langchain 0.0.185 → 0.0.186

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/agents/openai/index.cjs +2 -1
  2. package/dist/agents/openai/index.js +2 -1
  3. package/dist/chains/combine_docs_chain.cjs +1 -1
  4. package/dist/chains/combine_docs_chain.js +1 -1
  5. package/dist/chains/llm_chain.cjs +52 -7
  6. package/dist/chains/llm_chain.d.ts +20 -12
  7. package/dist/chains/llm_chain.js +53 -8
  8. package/dist/chat_models/ollama.cjs +8 -0
  9. package/dist/chat_models/ollama.d.ts +3 -0
  10. package/dist/chat_models/ollama.js +8 -0
  11. package/dist/chat_models/openai.cjs +3 -0
  12. package/dist/chat_models/openai.js +3 -0
  13. package/dist/document_loaders/fs/unstructured.d.ts +1 -5
  14. package/dist/embeddings/ollama.d.ts +1 -1
  15. package/dist/llms/ollama.cjs +8 -0
  16. package/dist/llms/ollama.d.ts +3 -0
  17. package/dist/llms/ollama.js +8 -0
  18. package/dist/llms/openai.cjs +1 -1
  19. package/dist/llms/openai.js +1 -1
  20. package/dist/output_parsers/index.cjs +3 -1
  21. package/dist/output_parsers/index.d.ts +1 -0
  22. package/dist/output_parsers/index.js +1 -0
  23. package/dist/output_parsers/openai_functions.cjs +3 -3
  24. package/dist/output_parsers/openai_functions.js +3 -3
  25. package/dist/output_parsers/openai_tools.cjs +53 -0
  26. package/dist/output_parsers/openai_tools.d.ts +22 -0
  27. package/dist/output_parsers/openai_tools.js +49 -0
  28. package/dist/prompts/base.d.ts +2 -1
  29. package/dist/prompts/chat.cjs +1 -1
  30. package/dist/prompts/chat.js +1 -1
  31. package/dist/schema/index.d.ts +2 -4
  32. package/dist/schema/runnable/base.d.ts +2 -2
  33. package/dist/util/ollama.d.ts +3 -0
  34. package/dist/util/types.cjs +5 -0
  35. package/dist/util/types.d.ts +4 -0
  36. package/dist/util/types.js +4 -0
  37. package/package.json +1 -1
@@ -143,7 +143,8 @@ class OpenAIAgent extends agent_js_1.Agent {
143
143
  const valuesForLLM = {
144
144
  functions: this.tools.map(convert_to_openai_js_1.formatToOpenAIFunction),
145
145
  };
146
- for (const key of this.llmChain.llm.callKeys) {
146
+ const callKeys = "callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
147
+ for (const key of callKeys) {
147
148
  if (key in inputs) {
148
149
  valuesForLLM[key] = inputs[key];
149
150
  delete valuesForPrompt[key];
@@ -139,7 +139,8 @@ export class OpenAIAgent extends Agent {
139
139
  const valuesForLLM = {
140
140
  functions: this.tools.map(formatToOpenAIFunction),
141
141
  };
142
- for (const key of this.llmChain.llm.callKeys) {
142
+ const callKeys = "callKeys" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];
143
+ for (const key of callKeys) {
143
144
  if (key in inputs) {
144
145
  valuesForLLM[key] = inputs[key];
145
146
  delete valuesForPrompt[key];
@@ -177,7 +177,7 @@ class MapReduceDocumentsChain extends base_js_1.BaseChain {
177
177
  [this.combineDocumentChain.inputKey]: currentDocs,
178
178
  ...rest,
179
179
  }));
180
- const length = await this.combineDocumentChain.llmChain.llm.getNumTokens(formatted);
180
+ const length = await this.combineDocumentChain.llmChain._getNumTokens(formatted);
181
181
  const withinTokenLimit = length < this.maxTokens;
182
182
  // If we can skip the map step, and we're within the token limit, we don't
183
183
  // need to run the map step, so just break out of the loop.
@@ -173,7 +173,7 @@ export class MapReduceDocumentsChain extends BaseChain {
173
173
  [this.combineDocumentChain.inputKey]: currentDocs,
174
174
  ...rest,
175
175
  }));
176
- const length = await this.combineDocumentChain.llmChain.llm.getNumTokens(formatted);
176
+ const length = await this.combineDocumentChain.llmChain._getNumTokens(formatted);
177
177
  const withinTokenLimit = length < this.maxTokens;
178
178
  // If we can skip the map step, and we're within the token limit, we don't
179
179
  // need to run the map step, so just break out of the loop.
@@ -5,6 +5,29 @@ const base_js_1 = require("./base.cjs");
5
5
  const base_js_2 = require("../prompts/base.cjs");
6
6
  const index_js_1 = require("../base_language/index.cjs");
7
7
  const noop_js_1 = require("../output_parsers/noop.cjs");
8
+ const base_js_3 = require("../schema/runnable/base.cjs");
9
+ function isBaseLanguageModel(llmLike) {
10
+ return typeof llmLike._llmType === "function";
11
+ }
12
+ function _getLanguageModel(llmLike) {
13
+ if (isBaseLanguageModel(llmLike)) {
14
+ return llmLike;
15
+ }
16
+ else if ("bound" in llmLike && base_js_3.Runnable.isRunnable(llmLike.bound)) {
17
+ return _getLanguageModel(llmLike.bound);
18
+ }
19
+ else if ("runnable" in llmLike &&
20
+ "fallbacks" in llmLike &&
21
+ base_js_3.Runnable.isRunnable(llmLike.runnable)) {
22
+ return _getLanguageModel(llmLike.runnable);
23
+ }
24
+ else if ("default" in llmLike && base_js_3.Runnable.isRunnable(llmLike.default)) {
25
+ return _getLanguageModel(llmLike.default);
26
+ }
27
+ else {
28
+ throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
29
+ }
30
+ }
8
31
  /**
9
32
  * Chain to run queries against LLMs.
10
33
  *
@@ -79,10 +102,15 @@ class LLMChain extends base_js_1.BaseChain {
79
102
  this.outputParser = this.prompt.outputParser;
80
103
  }
81
104
  }
105
+ getCallKeys() {
106
+ const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
107
+ return callKeys;
108
+ }
82
109
  /** @ignore */
83
110
  _selectMemoryInputs(values) {
84
111
  const valuesForMemory = super._selectMemoryInputs(values);
85
- for (const key of this.llm.callKeys) {
112
+ const callKeys = this.getCallKeys();
113
+ for (const key of callKeys) {
86
114
  if (key in values) {
87
115
  delete valuesForMemory[key];
88
116
  }
@@ -114,16 +142,29 @@ class LLMChain extends base_js_1.BaseChain {
114
142
  const valuesForLLM = {
115
143
  ...this.llmKwargs,
116
144
  };
117
- for (const key of this.llm.callKeys) {
145
+ const callKeys = this.getCallKeys();
146
+ for (const key of callKeys) {
118
147
  if (key in values) {
119
- valuesForLLM[key] = values[key];
120
- delete valuesForPrompt[key];
148
+ if (valuesForLLM) {
149
+ valuesForLLM[key] =
150
+ values[key];
151
+ delete valuesForPrompt[key];
152
+ }
121
153
  }
122
154
  }
123
155
  const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
124
- const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
156
+ if ("generatePrompt" in this.llm) {
157
+ const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
158
+ return {
159
+ [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
160
+ };
161
+ }
162
+ const modelWithParser = this.outputParser
163
+ ? this.llm.pipe(this.outputParser)
164
+ : this.llm;
165
+ const response = await modelWithParser.invoke(promptValue, runManager?.getChild());
125
166
  return {
126
- [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
167
+ [this.outputKey]: response,
127
168
  };
128
169
  }
129
170
  /**
@@ -160,11 +201,15 @@ class LLMChain extends base_js_1.BaseChain {
160
201
  }
161
202
  /** @deprecated */
162
203
  serialize() {
204
+ const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined;
163
205
  return {
164
206
  _type: `${this._chainType()}_chain`,
165
- llm: this.llm.serialize(),
207
+ llm: serialize,
166
208
  prompt: this.prompt.serialize(),
167
209
  };
168
210
  }
211
+ _getNumTokens(text) {
212
+ return _getLanguageModel(this.llm).getNumTokens(text);
213
+ }
169
214
  }
170
215
  exports.LLMChain = LLMChain;
@@ -1,21 +1,26 @@
1
1
  import { BaseChain, ChainInputs } from "./base.js";
2
2
  import { BasePromptTemplate } from "../prompts/base.js";
3
- import { BaseLanguageModel } from "../base_language/index.js";
4
- import { ChainValues, Generation, BasePromptValue } from "../schema/index.js";
3
+ import { BaseLanguageModel, BaseLanguageModelInput } from "../base_language/index.js";
4
+ import { ChainValues, Generation, BasePromptValue, BaseMessage } from "../schema/index.js";
5
5
  import { BaseLLMOutputParser } from "../schema/output_parser.js";
6
6
  import { SerializedLLMChain } from "./serde.js";
7
7
  import { CallbackManager } from "../callbacks/index.js";
8
8
  import { BaseCallbackConfig, CallbackManagerForChainRun, Callbacks } from "../callbacks/manager.js";
9
+ import { Runnable } from "../schema/runnable/base.js";
10
+ type LLMType = BaseLanguageModel | Runnable<BaseLanguageModelInput, string> | Runnable<BaseLanguageModelInput, BaseMessage>;
11
+ type CallOptionsIfAvailable<T> = T extends {
12
+ CallOptions: infer CO;
13
+ } ? CO : any;
9
14
  /**
10
15
  * Interface for the input parameters of the LLMChain class.
11
16
  */
12
- export interface LLMChainInput<T extends string | object = string, L extends BaseLanguageModel = BaseLanguageModel> extends ChainInputs {
17
+ export interface LLMChainInput<T extends string | object = string, Model extends LLMType = LLMType> extends ChainInputs {
13
18
  /** Prompt object to use */
14
19
  prompt: BasePromptTemplate;
15
20
  /** LLM Wrapper to use */
16
- llm: L;
21
+ llm: Model;
17
22
  /** Kwargs to pass to LLM */
18
- llmKwargs?: this["llm"]["CallOptions"];
23
+ llmKwargs?: CallOptionsIfAvailable<Model>;
19
24
  /** OutputParser to use */
20
25
  outputParser?: BaseLLMOutputParser<T>;
21
26
  /** Key to use for output, defaults to `text` */
@@ -34,17 +39,18 @@ export interface LLMChainInput<T extends string | object = string, L extends Bas
34
39
  * const llm = new LLMChain({ llm: new OpenAI(), prompt });
35
40
  * ```
36
41
  */
37
- export declare class LLMChain<T extends string | object = string, L extends BaseLanguageModel = BaseLanguageModel> extends BaseChain implements LLMChainInput<T> {
42
+ export declare class LLMChain<T extends string | object = string, Model extends LLMType = LLMType> extends BaseChain implements LLMChainInput<T> {
38
43
  static lc_name(): string;
39
44
  lc_serializable: boolean;
40
45
  prompt: BasePromptTemplate;
41
- llm: L;
42
- llmKwargs?: this["llm"]["CallOptions"];
46
+ llm: Model;
47
+ llmKwargs?: CallOptionsIfAvailable<Model>;
43
48
  outputKey: string;
44
49
  outputParser?: BaseLLMOutputParser<T>;
45
50
  get inputKeys(): string[];
46
51
  get outputKeys(): string[];
47
- constructor(fields: LLMChainInput<T, L>);
52
+ constructor(fields: LLMChainInput<T, Model>);
53
+ private getCallKeys;
48
54
  /** @ignore */
49
55
  _selectMemoryInputs(values: ChainValues): ChainValues;
50
56
  /** @ignore */
@@ -54,9 +60,9 @@ export declare class LLMChain<T extends string | object = string, L extends Base
54
60
  *
55
61
  * Wraps _call and handles memory.
56
62
  */
57
- call(values: ChainValues & this["llm"]["CallOptions"], config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;
63
+ call(values: ChainValues & CallOptionsIfAvailable<Model>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;
58
64
  /** @ignore */
59
- _call(values: ChainValues & this["llm"]["CallOptions"], runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
65
+ _call(values: ChainValues & CallOptionsIfAvailable<Model>, runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
60
66
  /**
61
67
  * Format prompt with values and pass to LLM
62
68
  *
@@ -69,9 +75,11 @@ export declare class LLMChain<T extends string | object = string, L extends Base
69
75
  * llm.predict({ adjective: "funny" })
70
76
  * ```
71
77
  */
72
- predict(values: ChainValues & this["llm"]["CallOptions"], callbackManager?: CallbackManager): Promise<T>;
78
+ predict(values: ChainValues & CallOptionsIfAvailable<Model>, callbackManager?: CallbackManager): Promise<T>;
73
79
  _chainType(): "llm";
74
80
  static deserialize(data: SerializedLLMChain): Promise<LLMChain<string, BaseLanguageModel<any, import("../base_language/index.js").BaseLanguageModelCallOptions>>>;
75
81
  /** @deprecated */
76
82
  serialize(): SerializedLLMChain;
83
+ _getNumTokens(text: string): Promise<number>;
77
84
  }
85
+ export {};
@@ -1,7 +1,30 @@
1
1
  import { BaseChain } from "./base.js";
2
2
  import { BasePromptTemplate } from "../prompts/base.js";
3
- import { BaseLanguageModel } from "../base_language/index.js";
3
+ import { BaseLanguageModel, } from "../base_language/index.js";
4
4
  import { NoOpOutputParser } from "../output_parsers/noop.js";
5
+ import { Runnable } from "../schema/runnable/base.js";
6
+ function isBaseLanguageModel(llmLike) {
7
+ return typeof llmLike._llmType === "function";
8
+ }
9
+ function _getLanguageModel(llmLike) {
10
+ if (isBaseLanguageModel(llmLike)) {
11
+ return llmLike;
12
+ }
13
+ else if ("bound" in llmLike && Runnable.isRunnable(llmLike.bound)) {
14
+ return _getLanguageModel(llmLike.bound);
15
+ }
16
+ else if ("runnable" in llmLike &&
17
+ "fallbacks" in llmLike &&
18
+ Runnable.isRunnable(llmLike.runnable)) {
19
+ return _getLanguageModel(llmLike.runnable);
20
+ }
21
+ else if ("default" in llmLike && Runnable.isRunnable(llmLike.default)) {
22
+ return _getLanguageModel(llmLike.default);
23
+ }
24
+ else {
25
+ throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
26
+ }
27
+ }
5
28
  /**
6
29
  * Chain to run queries against LLMs.
7
30
  *
@@ -76,10 +99,15 @@ export class LLMChain extends BaseChain {
76
99
  this.outputParser = this.prompt.outputParser;
77
100
  }
78
101
  }
102
+ getCallKeys() {
103
+ const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
104
+ return callKeys;
105
+ }
79
106
  /** @ignore */
80
107
  _selectMemoryInputs(values) {
81
108
  const valuesForMemory = super._selectMemoryInputs(values);
82
- for (const key of this.llm.callKeys) {
109
+ const callKeys = this.getCallKeys();
110
+ for (const key of callKeys) {
83
111
  if (key in values) {
84
112
  delete valuesForMemory[key];
85
113
  }
@@ -111,16 +139,29 @@ export class LLMChain extends BaseChain {
111
139
  const valuesForLLM = {
112
140
  ...this.llmKwargs,
113
141
  };
114
- for (const key of this.llm.callKeys) {
142
+ const callKeys = this.getCallKeys();
143
+ for (const key of callKeys) {
115
144
  if (key in values) {
116
- valuesForLLM[key] = values[key];
117
- delete valuesForPrompt[key];
145
+ if (valuesForLLM) {
146
+ valuesForLLM[key] =
147
+ values[key];
148
+ delete valuesForPrompt[key];
149
+ }
118
150
  }
119
151
  }
120
152
  const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
121
- const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
153
+ if ("generatePrompt" in this.llm) {
154
+ const { generations } = await this.llm.generatePrompt([promptValue], valuesForLLM, runManager?.getChild());
155
+ return {
156
+ [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
157
+ };
158
+ }
159
+ const modelWithParser = this.outputParser
160
+ ? this.llm.pipe(this.outputParser)
161
+ : this.llm;
162
+ const response = await modelWithParser.invoke(promptValue, runManager?.getChild());
122
163
  return {
123
- [this.outputKey]: await this._getFinalOutput(generations[0], promptValue, runManager),
164
+ [this.outputKey]: response,
124
165
  };
125
166
  }
126
167
  /**
@@ -157,10 +198,14 @@ export class LLMChain extends BaseChain {
157
198
  }
158
199
  /** @deprecated */
159
200
  serialize() {
201
+ const serialize = "serialize" in this.llm ? this.llm.serialize() : undefined;
160
202
  return {
161
203
  _type: `${this._chainType()}_chain`,
162
- llm: this.llm.serialize(),
204
+ llm: serialize,
163
205
  prompt: this.prompt.serialize(),
164
206
  };
165
207
  }
208
+ _getNumTokens(text) {
209
+ return _getLanguageModel(this.llm).getNumTokens(text);
210
+ }
166
211
  }
@@ -213,6 +213,12 @@ class ChatOllama extends base_js_1.SimpleChatModel {
213
213
  writable: true,
214
214
  value: void 0
215
215
  });
216
+ Object.defineProperty(this, "format", {
217
+ enumerable: true,
218
+ configurable: true,
219
+ writable: true,
220
+ value: void 0
221
+ });
216
222
  this.model = fields.model ?? this.model;
217
223
  this.baseUrl = fields.baseUrl?.endsWith("/")
218
224
  ? fields.baseUrl.slice(0, -1)
@@ -247,6 +253,7 @@ class ChatOllama extends base_js_1.SimpleChatModel {
247
253
  this.useMLock = fields.useMLock;
248
254
  this.useMMap = fields.useMMap;
249
255
  this.vocabOnly = fields.vocabOnly;
256
+ this.format = fields.format;
250
257
  }
251
258
  _llmType() {
252
259
  return "ollama";
@@ -260,6 +267,7 @@ class ChatOllama extends base_js_1.SimpleChatModel {
260
267
  invocationParams(options) {
261
268
  return {
262
269
  model: this.model,
270
+ format: this.format,
263
271
  options: {
264
272
  embedding_only: this.embeddingOnly,
265
273
  f16_kv: this.f16KV,
@@ -3,6 +3,7 @@ import { BaseLanguageModelCallOptions } from "../base_language/index.js";
3
3
  import { OllamaInput } from "../util/ollama.js";
4
4
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
5
5
  import { BaseMessage, ChatGenerationChunk } from "../schema/index.js";
6
+ import type { StringWithAutocomplete } from "../util/types.js";
6
7
  /**
7
8
  * An interface defining the options for an Ollama API call. It extends
8
9
  * the BaseLanguageModelCallOptions interface.
@@ -49,6 +50,7 @@ export declare class ChatOllama extends SimpleChatModel<OllamaCallOptions> imple
49
50
  useMLock?: boolean;
50
51
  useMMap?: boolean;
51
52
  vocabOnly?: boolean;
53
+ format?: StringWithAutocomplete<"json">;
52
54
  constructor(fields: OllamaInput & BaseChatModelParams);
53
55
  _llmType(): string;
54
56
  /**
@@ -59,6 +61,7 @@ export declare class ChatOllama extends SimpleChatModel<OllamaCallOptions> imple
59
61
  */
60
62
  invocationParams(options?: this["ParsedCallOptions"]): {
61
63
  model: string;
64
+ format: StringWithAutocomplete<"json"> | undefined;
62
65
  options: {
63
66
  embedding_only: boolean | undefined;
64
67
  f16_kv: boolean | undefined;
@@ -210,6 +210,12 @@ export class ChatOllama extends SimpleChatModel {
210
210
  writable: true,
211
211
  value: void 0
212
212
  });
213
+ Object.defineProperty(this, "format", {
214
+ enumerable: true,
215
+ configurable: true,
216
+ writable: true,
217
+ value: void 0
218
+ });
213
219
  this.model = fields.model ?? this.model;
214
220
  this.baseUrl = fields.baseUrl?.endsWith("/")
215
221
  ? fields.baseUrl.slice(0, -1)
@@ -244,6 +250,7 @@ export class ChatOllama extends SimpleChatModel {
244
250
  this.useMLock = fields.useMLock;
245
251
  this.useMMap = fields.useMMap;
246
252
  this.vocabOnly = fields.vocabOnly;
253
+ this.format = fields.format;
247
254
  }
248
255
  _llmType() {
249
256
  return "ollama";
@@ -257,6 +264,7 @@ export class ChatOllama extends SimpleChatModel {
257
264
  invocationParams(options) {
258
265
  return {
259
266
  model: this.model,
267
+ format: this.format,
260
268
  options: {
261
269
  embedding_only: this.embeddingOnly,
262
270
  f16_kv: this.f16KV,
@@ -421,6 +421,9 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
421
421
  continue;
422
422
  }
423
423
  const { delta } = choice;
424
+ if (!delta) {
425
+ continue;
426
+ }
424
427
  const chunk = _convertDeltaToMessageChunk(delta, defaultRole);
425
428
  defaultRole = delta.role ?? defaultRole;
426
429
  const newTokenIndices = {
@@ -418,6 +418,9 @@ export class ChatOpenAI extends BaseChatModel {
418
418
  continue;
419
419
  }
420
420
  const { delta } = choice;
421
+ if (!delta) {
422
+ continue;
423
+ }
421
424
  const chunk = _convertDeltaToMessageChunk(delta, defaultRole);
422
425
  defaultRole = delta.role ?? defaultRole;
423
426
  const newTokenIndices = {
@@ -5,6 +5,7 @@ import type { readFile as ReadFileT } from "node:fs/promises";
5
5
  import { DirectoryLoader, UnknownHandling } from "./directory.js";
6
6
  import { Document } from "../../document.js";
7
7
  import { BaseDocumentLoader } from "../base.js";
8
+ import type { StringWithAutocomplete } from "../../util/types.js";
8
9
  /**
9
10
  * Represents an element returned by the Unstructured API. It has
10
11
  * properties for the element type, text content, and metadata.
@@ -40,11 +41,6 @@ export type SkipInferTableTypes = "txt" | "text" | "pdf" | "docx" | "doc" | "jpg
40
41
  * Set the chunking_strategy to chunk text into larger or smaller elements. Defaults to None with optional arg of by_title
41
42
  */
42
43
  type ChunkingStrategy = "None" | "by_title";
43
- /**
44
- * Represents a string value with autocomplete suggestions. It is used for
45
- * the `strategy` property in the UnstructuredLoaderOptions.
46
- */
47
- type StringWithAutocomplete<T> = T | (string & Record<never, never>);
48
44
  export type UnstructuredLoaderOptions = {
49
45
  apiKey?: string;
50
46
  apiUrl?: string;
@@ -1,6 +1,6 @@
1
1
  import { OllamaInput, OllamaRequestParams } from "../util/ollama.js";
2
2
  import { Embeddings, EmbeddingsParams } from "./base.js";
3
- type CamelCasedRequestOptions = Omit<OllamaInput, "baseUrl" | "model">;
3
+ type CamelCasedRequestOptions = Omit<OllamaInput, "baseUrl" | "model" | "format">;
4
4
  /**
5
5
  * Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
6
6
  * defines additional parameters specific to the OllamaEmbeddings class.
@@ -212,6 +212,12 @@ class Ollama extends base_js_1.LLM {
212
212
  writable: true,
213
213
  value: void 0
214
214
  });
215
+ Object.defineProperty(this, "format", {
216
+ enumerable: true,
217
+ configurable: true,
218
+ writable: true,
219
+ value: void 0
220
+ });
215
221
  this.model = fields.model ?? this.model;
216
222
  this.baseUrl = fields.baseUrl?.endsWith("/")
217
223
  ? fields.baseUrl.slice(0, -1)
@@ -246,6 +252,7 @@ class Ollama extends base_js_1.LLM {
246
252
  this.useMLock = fields.useMLock;
247
253
  this.useMMap = fields.useMMap;
248
254
  this.vocabOnly = fields.vocabOnly;
255
+ this.format = fields.format;
249
256
  }
250
257
  _llmType() {
251
258
  return "ollama";
@@ -253,6 +260,7 @@ class Ollama extends base_js_1.LLM {
253
260
  invocationParams(options) {
254
261
  return {
255
262
  model: this.model,
263
+ format: this.format,
256
264
  options: {
257
265
  embedding_only: this.embeddingOnly,
258
266
  f16_kv: this.f16KV,
@@ -2,6 +2,7 @@ import { LLM, BaseLLMParams } from "./base.js";
2
2
  import { OllamaInput, OllamaCallOptions } from "../util/ollama.js";
3
3
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
4
4
  import { GenerationChunk } from "../schema/index.js";
5
+ import type { StringWithAutocomplete } from "../util/types.js";
5
6
  /**
6
7
  * Class that represents the Ollama language model. It extends the base
7
8
  * LLM class and implements the OllamaInput interface.
@@ -41,10 +42,12 @@ export declare class Ollama extends LLM<OllamaCallOptions> implements OllamaInpu
41
42
  useMLock?: boolean;
42
43
  useMMap?: boolean;
43
44
  vocabOnly?: boolean;
45
+ format?: StringWithAutocomplete<"json">;
44
46
  constructor(fields: OllamaInput & BaseLLMParams);
45
47
  _llmType(): string;
46
48
  invocationParams(options?: this["ParsedCallOptions"]): {
47
49
  model: string;
50
+ format: StringWithAutocomplete<"json"> | undefined;
48
51
  options: {
49
52
  embedding_only: boolean | undefined;
50
53
  f16_kv: boolean | undefined;
@@ -209,6 +209,12 @@ export class Ollama extends LLM {
209
209
  writable: true,
210
210
  value: void 0
211
211
  });
212
+ Object.defineProperty(this, "format", {
213
+ enumerable: true,
214
+ configurable: true,
215
+ writable: true,
216
+ value: void 0
217
+ });
212
218
  this.model = fields.model ?? this.model;
213
219
  this.baseUrl = fields.baseUrl?.endsWith("/")
214
220
  ? fields.baseUrl.slice(0, -1)
@@ -243,6 +249,7 @@ export class Ollama extends LLM {
243
249
  this.useMLock = fields.useMLock;
244
250
  this.useMMap = fields.useMMap;
245
251
  this.vocabOnly = fields.vocabOnly;
252
+ this.format = fields.format;
246
253
  }
247
254
  _llmType() {
248
255
  return "ollama";
@@ -250,6 +257,7 @@ export class Ollama extends LLM {
250
257
  invocationParams(options) {
251
258
  return {
252
259
  model: this.model,
260
+ format: this.format,
253
261
  options: {
254
262
  embedding_only: this.embeddingOnly,
255
263
  f16_kv: this.f16KV,
@@ -121,7 +121,7 @@ class OpenAI extends base_js_1.BaseLLM {
121
121
  enumerable: true,
122
122
  configurable: true,
123
123
  writable: true,
124
- value: "text-davinci-003"
124
+ value: "gpt-3.5-turbo-instruct"
125
125
  });
126
126
  Object.defineProperty(this, "modelKwargs", {
127
127
  enumerable: true,
@@ -118,7 +118,7 @@ export class OpenAI extends BaseLLM {
118
118
  enumerable: true,
119
119
  configurable: true,
120
120
  writable: true,
121
- value: "text-davinci-003"
121
+ value: "gpt-3.5-turbo-instruct"
122
122
  });
123
123
  Object.defineProperty(this, "modelKwargs", {
124
124
  enumerable: true,
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.JsonKeyOutputFunctionsParser = exports.JsonOutputFunctionsParser = exports.OutputFunctionsParser = exports.CustomListOutputParser = exports.RouterOutputParser = exports.CombiningOutputParser = exports.OutputFixingParser = exports.JsonMarkdownStructuredOutputParser = exports.AsymmetricStructuredOutputParser = exports.StructuredOutputParser = exports.RegexParser = exports.CommaSeparatedListOutputParser = exports.ListOutputParser = void 0;
3
+ exports.JsonOutputToolsParser = exports.JsonKeyOutputFunctionsParser = exports.JsonOutputFunctionsParser = exports.OutputFunctionsParser = exports.CustomListOutputParser = exports.RouterOutputParser = exports.CombiningOutputParser = exports.OutputFixingParser = exports.JsonMarkdownStructuredOutputParser = exports.AsymmetricStructuredOutputParser = exports.StructuredOutputParser = exports.RegexParser = exports.CommaSeparatedListOutputParser = exports.ListOutputParser = void 0;
4
4
  var list_js_1 = require("./list.cjs");
5
5
  Object.defineProperty(exports, "ListOutputParser", { enumerable: true, get: function () { return list_js_1.ListOutputParser; } });
6
6
  Object.defineProperty(exports, "CommaSeparatedListOutputParser", { enumerable: true, get: function () { return list_js_1.CommaSeparatedListOutputParser; } });
@@ -22,3 +22,5 @@ var openai_functions_js_1 = require("../output_parsers/openai_functions.cjs");
22
22
  Object.defineProperty(exports, "OutputFunctionsParser", { enumerable: true, get: function () { return openai_functions_js_1.OutputFunctionsParser; } });
23
23
  Object.defineProperty(exports, "JsonOutputFunctionsParser", { enumerable: true, get: function () { return openai_functions_js_1.JsonOutputFunctionsParser; } });
24
24
  Object.defineProperty(exports, "JsonKeyOutputFunctionsParser", { enumerable: true, get: function () { return openai_functions_js_1.JsonKeyOutputFunctionsParser; } });
25
+ var openai_tools_js_1 = require("../output_parsers/openai_tools.cjs");
26
+ Object.defineProperty(exports, "JsonOutputToolsParser", { enumerable: true, get: function () { return openai_tools_js_1.JsonOutputToolsParser; } });
@@ -6,3 +6,4 @@ export { CombiningOutputParser } from "./combining.js";
6
6
  export { RouterOutputParser, type RouterOutputParserInput } from "./router.js";
7
7
  export { CustomListOutputParser } from "./list.js";
8
8
  export { type FunctionParameters, OutputFunctionsParser, JsonOutputFunctionsParser, JsonKeyOutputFunctionsParser, } from "../output_parsers/openai_functions.js";
9
+ export { type ParsedToolCall, JsonOutputToolsParser, } from "../output_parsers/openai_tools.js";
@@ -6,3 +6,4 @@ export { CombiningOutputParser } from "./combining.js";
6
6
  export { RouterOutputParser } from "./router.js";
7
7
  export { CustomListOutputParser } from "./list.js";
8
8
  export { OutputFunctionsParser, JsonOutputFunctionsParser, JsonKeyOutputFunctionsParser, } from "../output_parsers/openai_functions.js";
9
+ export { JsonOutputToolsParser, } from "../output_parsers/openai_tools.js";
@@ -18,7 +18,7 @@ class OutputFunctionsParser extends output_parser_js_1.BaseLLMOutputParser {
18
18
  enumerable: true,
19
19
  configurable: true,
20
20
  writable: true,
21
- value: ["langchain", "chains", "openai_functions"]
21
+ value: ["langchain", "output_parsers"]
22
22
  });
23
23
  Object.defineProperty(this, "lc_serializable", {
24
24
  enumerable: true,
@@ -75,7 +75,7 @@ class JsonOutputFunctionsParser extends output_parser_js_1.BaseCumulativeTransfo
75
75
  enumerable: true,
76
76
  configurable: true,
77
77
  writable: true,
78
- value: ["langchain", "chains", "openai_functions"]
78
+ value: ["langchain", "output_parsers"]
79
79
  });
80
80
  Object.defineProperty(this, "lc_serializable", {
81
81
  enumerable: true,
@@ -166,7 +166,7 @@ class JsonKeyOutputFunctionsParser extends output_parser_js_1.BaseLLMOutputParse
166
166
  enumerable: true,
167
167
  configurable: true,
168
168
  writable: true,
169
- value: ["langchain", "chains", "openai_functions"]
169
+ value: ["langchain", "output_parsers"]
170
170
  });
171
171
  Object.defineProperty(this, "lc_serializable", {
172
172
  enumerable: true,
@@ -15,7 +15,7 @@ export class OutputFunctionsParser extends BaseLLMOutputParser {
15
15
  enumerable: true,
16
16
  configurable: true,
17
17
  writable: true,
18
- value: ["langchain", "chains", "openai_functions"]
18
+ value: ["langchain", "output_parsers"]
19
19
  });
20
20
  Object.defineProperty(this, "lc_serializable", {
21
21
  enumerable: true,
@@ -71,7 +71,7 @@ export class JsonOutputFunctionsParser extends BaseCumulativeTransformOutputPars
71
71
  enumerable: true,
72
72
  configurable: true,
73
73
  writable: true,
74
- value: ["langchain", "chains", "openai_functions"]
74
+ value: ["langchain", "output_parsers"]
75
75
  });
76
76
  Object.defineProperty(this, "lc_serializable", {
77
77
  enumerable: true,
@@ -161,7 +161,7 @@ export class JsonKeyOutputFunctionsParser extends BaseLLMOutputParser {
161
161
  enumerable: true,
162
162
  configurable: true,
163
163
  writable: true,
164
- value: ["langchain", "chains", "openai_functions"]
164
+ value: ["langchain", "output_parsers"]
165
165
  });
166
166
  Object.defineProperty(this, "lc_serializable", {
167
167
  enumerable: true,
@@ -0,0 +1,53 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.JsonOutputToolsParser = void 0;
4
+ const output_parser_js_1 = require("../schema/output_parser.cjs");
5
+ /**
6
+ * Class for parsing the output of an LLM into a JSON object. Uses an
7
+ * instance of `OutputToolsParser` to parse the output.
8
+ */
9
+ class JsonOutputToolsParser extends output_parser_js_1.BaseLLMOutputParser {
10
+ constructor() {
11
+ super(...arguments);
12
+ Object.defineProperty(this, "lc_namespace", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: ["langchain", "output_parsers"]
17
+ });
18
+ Object.defineProperty(this, "lc_serializable", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: true
23
+ });
24
+ }
25
+ static lc_name() {
26
+ return "JsonOutputToolsParser";
27
+ }
28
+ /**
29
+ * Parses the output and returns a JSON object. If `argsOnly` is true,
30
+ * only the arguments of the function call are returned.
31
+ * @param generations The output of the LLM to parse.
32
+ * @returns A JSON object representation of the function call or its arguments.
33
+ */
34
+ async parseResult(generations) {
35
+ const toolCalls = generations[0].message.additional_kwargs.tool_calls;
36
+ if (!toolCalls) {
37
+ throw new Error(`No tools_call in message ${JSON.stringify(generations)}`);
38
+ }
39
+ const clonedToolCalls = JSON.parse(JSON.stringify(toolCalls));
40
+ const parsedToolCalls = [];
41
+ for (const toolCall of clonedToolCalls) {
42
+ if (toolCall.function !== undefined) {
43
+ const functionArgs = toolCall.function.arguments;
44
+ parsedToolCalls.push({
45
+ name: toolCall.function.name,
46
+ arguments: JSON.parse(functionArgs),
47
+ });
48
+ }
49
+ }
50
+ return parsedToolCalls;
51
+ }
52
+ }
53
+ exports.JsonOutputToolsParser = JsonOutputToolsParser;
@@ -0,0 +1,22 @@
1
+ import { BaseLLMOutputParser } from "../schema/output_parser.js";
2
+ import type { ChatGeneration } from "../schema/index.js";
3
+ export type ParsedToolCall = {
4
+ name: string;
5
+ arguments: Record<string, any>;
6
+ };
7
+ /**
8
+ * Class for parsing the output of an LLM into a JSON object. Uses an
9
+ * instance of `OutputToolsParser` to parse the output.
10
+ */
11
+ export declare class JsonOutputToolsParser extends BaseLLMOutputParser<ParsedToolCall[]> {
12
+ static lc_name(): string;
13
+ lc_namespace: string[];
14
+ lc_serializable: boolean;
15
+ /**
16
+ * Parses the output and returns a JSON object. If `argsOnly` is true,
17
+ * only the arguments of the function call are returned.
18
+ * @param generations The output of the LLM to parse.
19
+ * @returns A JSON object representation of the function call or its arguments.
20
+ */
21
+ parseResult(generations: ChatGeneration[]): Promise<ParsedToolCall[]>;
22
+ }
@@ -0,0 +1,49 @@
1
+ import { BaseLLMOutputParser } from "../schema/output_parser.js";
2
+ /**
3
+ * Class for parsing the output of an LLM into a JSON object. Uses an
4
+ * instance of `OutputToolsParser` to parse the output.
5
+ */
6
+ export class JsonOutputToolsParser extends BaseLLMOutputParser {
7
+ constructor() {
8
+ super(...arguments);
9
+ Object.defineProperty(this, "lc_namespace", {
10
+ enumerable: true,
11
+ configurable: true,
12
+ writable: true,
13
+ value: ["langchain", "output_parsers"]
14
+ });
15
+ Object.defineProperty(this, "lc_serializable", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: true
20
+ });
21
+ }
22
+ static lc_name() {
23
+ return "JsonOutputToolsParser";
24
+ }
25
+ /**
26
+ * Parses the output and returns a JSON object. If `argsOnly` is true,
27
+ * only the arguments of the function call are returned.
28
+ * @param generations The output of the LLM to parse.
29
+ * @returns A JSON object representation of the function call or its arguments.
30
+ */
31
+ async parseResult(generations) {
32
+ const toolCalls = generations[0].message.additional_kwargs.tool_calls;
33
+ if (!toolCalls) {
34
+ throw new Error(`No tools_call in message ${JSON.stringify(generations)}`);
35
+ }
36
+ const clonedToolCalls = JSON.parse(JSON.stringify(toolCalls));
37
+ const parsedToolCalls = [];
38
+ for (const toolCall of clonedToolCalls) {
39
+ if (toolCall.function !== undefined) {
40
+ const functionArgs = toolCall.function.arguments;
41
+ parsedToolCalls.push({
42
+ name: toolCall.function.name,
43
+ arguments: JSON.parse(functionArgs),
44
+ });
45
+ }
46
+ }
47
+ return parsedToolCalls;
48
+ }
49
+ }
@@ -5,7 +5,8 @@ import { SerializedBasePromptTemplate } from "./serde.js";
5
5
  import { SerializedFields } from "../load/map_keys.js";
6
6
  import { Runnable } from "../schema/runnable/index.js";
7
7
  import { BaseCallbackConfig } from "../callbacks/manager.js";
8
- export type TypedPromptInputValues<RunInput> = InputValues<Extract<keyof RunInput, string> | (string & Record<never, never>)>;
8
+ import type { StringWithAutocomplete } from "../util/types.js";
9
+ export type TypedPromptInputValues<RunInput> = InputValues<StringWithAutocomplete<Extract<keyof RunInput, string>>>;
9
10
  /**
10
11
  * Represents a prompt value as a string. It extends the BasePromptValue
11
12
  * class and overrides the toString and toChatMessages methods.
@@ -53,7 +53,7 @@ class ChatPromptValue extends index_js_1.BasePromptValue {
53
53
  // eslint-disable-next-line no-param-reassign
54
54
  fields = { messages: fields };
55
55
  }
56
- super(...arguments);
56
+ super(fields);
57
57
  Object.defineProperty(this, "lc_namespace", {
58
58
  enumerable: true,
59
59
  configurable: true,
@@ -49,7 +49,7 @@ export class ChatPromptValue extends BasePromptValue {
49
49
  // eslint-disable-next-line no-param-reassign
50
50
  fields = { messages: fields };
51
51
  }
52
- super(...arguments);
52
+ super(fields);
53
53
  Object.defineProperty(this, "lc_namespace", {
54
54
  enumerable: true,
55
55
  configurable: true,
@@ -1,6 +1,7 @@
1
1
  import type { OpenAI as OpenAIClient } from "openai";
2
2
  import { Document } from "../document.js";
3
3
  import { Serializable } from "../load/serializable.js";
4
+ import type { StringWithAutocomplete } from "../util/types.js";
4
5
  export declare const RUN_KEY = "__run";
5
6
  export type Example = Record<string, string>;
6
7
  export type InputValues<K extends string = string> = Record<K, any>;
@@ -252,10 +253,7 @@ export declare class ChatMessage extends BaseMessage implements ChatMessageField
252
253
  _getType(): MessageType;
253
254
  static isInstance(message: BaseMessage): message is ChatMessage;
254
255
  }
255
- export type BaseMessageLike = BaseMessage | [
256
- MessageType | "user" | "assistant" | (string & Record<never, never>),
257
- string
258
- ] | string;
256
+ export type BaseMessageLike = BaseMessage | [StringWithAutocomplete<MessageType | "user" | "assistant">, string] | string;
259
257
  export declare function isBaseMessage(messageLike?: unknown): messageLike is BaseMessage;
260
258
  export declare function isBaseMessageChunk(messageLike?: unknown): messageLike is BaseMessageChunk;
261
259
  export declare function coerceMessageLikeToMessage(messageLike: BaseMessageLike): BaseMessage;
@@ -317,8 +317,8 @@ export declare class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable
317
317
  static lc_name(): string;
318
318
  lc_namespace: string[];
319
319
  lc_serializable: boolean;
320
- protected runnable: Runnable<RunInput, RunOutput>;
321
- protected fallbacks: Runnable<RunInput, RunOutput>[];
320
+ runnable: Runnable<RunInput, RunOutput>;
321
+ fallbacks: Runnable<RunInput, RunOutput>[];
322
322
  constructor(fields: {
323
323
  runnable: Runnable<RunInput, RunOutput>;
324
324
  fallbacks: Runnable<RunInput, RunOutput>[];
@@ -1,4 +1,5 @@
1
1
  import { BaseLanguageModelCallOptions } from "../base_language/index.js";
2
+ import type { StringWithAutocomplete } from "./types.js";
2
3
  export interface OllamaInput {
3
4
  embeddingOnly?: boolean;
4
5
  f16KV?: boolean;
@@ -32,10 +33,12 @@ export interface OllamaInput {
32
33
  useMLock?: boolean;
33
34
  useMMap?: boolean;
34
35
  vocabOnly?: boolean;
36
+ format?: StringWithAutocomplete<"json">;
35
37
  }
36
38
  export interface OllamaRequestParams {
37
39
  model: string;
38
40
  prompt: string;
41
+ format?: StringWithAutocomplete<"json">;
39
42
  options: {
40
43
  embedding_only?: boolean;
41
44
  f16_kv?: boolean;
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ /**
3
+ * Represents a string value with autocompleted, but not required, suggestions.
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Represents a string value with autocompleted, but not required, suggestions.
3
+ */
4
+ export type StringWithAutocomplete<T> = T | (string & Record<never, never>);
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Represents a string value with autocompleted, but not required, suggestions.
3
+ */
4
+ export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.0.185",
3
+ "version": "0.0.186",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {