langchain 0.3.34 → 0.3.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import type { BaseLanguageModelInterface } from "@langchain/core/language_models
4
4
  import { AgentStep } from "@langchain/core/agents";
5
5
  import { AgentRunnableSequence } from "../agent.js";
6
6
  /**
7
- * Params used by the createXmlAgent function.
7
+ * Params used by the createReactAgent function.
8
8
  */
9
9
  export type CreateReactAgentParams = {
10
10
  /** LLM to use for the agent. */
@@ -47,7 +47,7 @@ class LocalFileCache extends caches_1.BaseCache {
47
47
  * @returns An array of Generations if found, null otherwise.
48
48
  */
49
49
  async lookup(prompt, llmKey) {
50
- const key = `${(0, caches_1.getCacheKey)(prompt, llmKey)}.json`;
50
+ const key = `${this.keyEncoder(prompt, llmKey)}.json`;
51
51
  try {
52
52
  const content = await promises_1.default.readFile(node_path_1.default.join(this.cacheDir, key));
53
53
  return JSON.parse(content.toString()).map(caches_1.deserializeStoredGeneration);
@@ -65,7 +65,7 @@ class LocalFileCache extends caches_1.BaseCache {
65
65
  * @param generations The value to be stored in the cache.
66
66
  */
67
67
  async update(prompt, llmKey, generations) {
68
- const key = `${(0, caches_1.getCacheKey)(prompt, llmKey)}.json`;
68
+ const key = `${this.keyEncoder(prompt, llmKey)}.json`;
69
69
  await promises_1.default.writeFile(node_path_1.default.join(this.cacheDir, key), JSON.stringify(generations.map(caches_1.serializeGeneration)));
70
70
  }
71
71
  }
@@ -1,6 +1,6 @@
1
1
  import path from "node:path";
2
2
  import fs from "node:fs/promises";
3
- import { BaseCache, getCacheKey, serializeGeneration, deserializeStoredGeneration, } from "@langchain/core/caches";
3
+ import { BaseCache, serializeGeneration, deserializeStoredGeneration, } from "@langchain/core/caches";
4
4
  /**
5
5
  * A cache that uses the local filesystem as the backing store.
6
6
  * This is useful for local development and testing. But it is not recommended for production use.
@@ -41,7 +41,7 @@ export class LocalFileCache extends BaseCache {
41
41
  * @returns An array of Generations if found, null otherwise.
42
42
  */
43
43
  async lookup(prompt, llmKey) {
44
- const key = `${getCacheKey(prompt, llmKey)}.json`;
44
+ const key = `${this.keyEncoder(prompt, llmKey)}.json`;
45
45
  try {
46
46
  const content = await fs.readFile(path.join(this.cacheDir, key));
47
47
  return JSON.parse(content.toString()).map(deserializeStoredGeneration);
@@ -59,7 +59,7 @@ export class LocalFileCache extends BaseCache {
59
59
  * @param generations The value to be stored in the cache.
60
60
  */
61
61
  async update(prompt, llmKey, generations) {
62
- const key = `${getCacheKey(prompt, llmKey)}.json`;
62
+ const key = `${this.keyEncoder(prompt, llmKey)}.json`;
63
63
  await fs.writeFile(path.join(this.cacheDir, key), JSON.stringify(generations.map(serializeGeneration)));
64
64
  }
65
65
  }
@@ -56,6 +56,7 @@ const _SUPPORTED_PROVIDERS = [
56
56
  "cerebras",
57
57
  "deepseek",
58
58
  "xai",
59
+ "perplexity",
59
60
  ];
60
61
  async function _initChatModelHelper(model, modelProvider,
61
62
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -148,6 +149,18 @@ params = {}) {
148
149
  "@langchain/community/chat_models/togetherai")));
149
150
  return new ChatTogetherAI({ model, ...passedParams });
150
151
  }
152
+ case "perplexity": {
153
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
154
+ // @ts-ignore - Can not install as a proper dependency due to circular dependency
155
+ const { ChatPerplexity } = await Promise.resolve().then(() => __importStar(require(
156
+ // We can not 'expect-error' because if you explicitly build `@langchain/community`
157
+ // this import will be able to be resolved, thus there will be no error. However
158
+ // this will never be the case in CI.
159
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
160
+ // @ts-ignore - Can not install as a proper dependency due to circular dependency
161
+ "@langchain/community/chat_models/perplexity")));
162
+ return new ChatPerplexity({ model, ...passedParams });
163
+ }
151
164
  default: {
152
165
  const supported = _SUPPORTED_PROVIDERS.join(", ");
153
166
  throw new Error(`Unsupported { modelProvider: ${modelProviderCopy} }.\n\nSupported model providers are: ${supported}`);
@@ -203,6 +216,9 @@ function _inferModelProvider(modelName) {
203
216
  else if (modelName.startsWith("mistral")) {
204
217
  return "mistralai";
205
218
  }
219
+ else if (modelName.startsWith("sonar") || modelName.startsWith("pplx")) {
220
+ return "perplexity";
221
+ }
206
222
  else {
207
223
  return undefined;
208
224
  }
@@ -456,6 +472,7 @@ exports.ConfigurableModel = ConfigurableModel;
456
472
  * - mistralai (@langchain/mistralai)
457
473
  * - groq (@langchain/groq)
458
474
  * - ollama (@langchain/ollama)
475
+ * - perplexity (@langchain/community/chat_models/perplexity)
459
476
  * - cerebras (@langchain/cerebras)
460
477
  * - deepseek (@langchain/deepseek)
461
478
  * - xai (@langchain/xai)
@@ -9,7 +9,7 @@ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
9
9
  import { ChatResult } from "@langchain/core/outputs";
10
10
  interface EventStreamCallbackHandlerInput extends Omit<LogStreamCallbackHandlerInput, "_schemaFormat"> {
11
11
  }
12
- declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek", "xai"];
12
+ declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek", "xai", "perplexity"];
13
13
  export type ChatModelProvider = (typeof _SUPPORTED_PROVIDERS)[number];
14
14
  export interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {
15
15
  tools?: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[];
@@ -18,6 +18,7 @@ const _SUPPORTED_PROVIDERS = [
18
18
  "cerebras",
19
19
  "deepseek",
20
20
  "xai",
21
+ "perplexity",
21
22
  ];
22
23
  async function _initChatModelHelper(model, modelProvider,
23
24
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -110,6 +111,18 @@ params = {}) {
110
111
  "@langchain/community/chat_models/togetherai");
111
112
  return new ChatTogetherAI({ model, ...passedParams });
112
113
  }
114
+ case "perplexity": {
115
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
116
+ // @ts-ignore - Can not install as a proper dependency due to circular dependency
117
+ const { ChatPerplexity } = await import(
118
+ // We can not 'expect-error' because if you explicitly build `@langchain/community`
119
+ // this import will be able to be resolved, thus there will be no error. However
120
+ // this will never be the case in CI.
121
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
122
+ // @ts-ignore - Can not install as a proper dependency due to circular dependency
123
+ "@langchain/community/chat_models/perplexity");
124
+ return new ChatPerplexity({ model, ...passedParams });
125
+ }
113
126
  default: {
114
127
  const supported = _SUPPORTED_PROVIDERS.join(", ");
115
128
  throw new Error(`Unsupported { modelProvider: ${modelProviderCopy} }.\n\nSupported model providers are: ${supported}`);
@@ -165,6 +178,9 @@ export function _inferModelProvider(modelName) {
165
178
  else if (modelName.startsWith("mistral")) {
166
179
  return "mistralai";
167
180
  }
181
+ else if (modelName.startsWith("sonar") || modelName.startsWith("pplx")) {
182
+ return "perplexity";
183
+ }
168
184
  else {
169
185
  return undefined;
170
186
  }
@@ -417,6 +433,7 @@ export class ConfigurableModel extends BaseChatModel {
417
433
  * - mistralai (@langchain/mistralai)
418
434
  * - groq (@langchain/groq)
419
435
  * - ollama (@langchain/ollama)
436
+ * - perplexity (@langchain/community/chat_models/perplexity)
420
437
  * - cerebras (@langchain/cerebras)
421
438
  * - deepseek (@langchain/deepseek)
422
439
  * - xai (@langchain/xai)
package/dist/hub/base.cjs CHANGED
@@ -94,6 +94,9 @@ modelClass) {
94
94
  else if (modelLcName === "ChatBedrockConverse") {
95
95
  importMapKey = "chat_models__chat_bedrock_converse";
96
96
  }
97
+ else if (modelLcName === "ChatMistralAI") {
98
+ importMapKey = "chat_models__mistralai";
99
+ }
97
100
  else if (modelLcName === "ChatMistral") {
98
101
  importMapKey = "chat_models__mistralai";
99
102
  }
@@ -146,14 +149,21 @@ function bindOutputSchema(loadedSequence) {
146
149
  "schema" in loadedSequence.first &&
147
150
  "last" in loadedSequence &&
148
151
  loadedSequence.last !== null &&
149
- typeof loadedSequence.last === "object" &&
150
- "bound" in loadedSequence.last &&
151
- loadedSequence.last.bound !== null &&
152
- typeof loadedSequence.last.bound === "object" &&
153
- "withStructuredOutput" in loadedSequence.last.bound &&
154
- typeof loadedSequence.last.bound.withStructuredOutput === "function") {
155
- // eslint-disable-next-line no-param-reassign
156
- loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(loadedSequence.first.schema);
152
+ typeof loadedSequence.last === "object") {
153
+ if ("bound" in loadedSequence.last &&
154
+ loadedSequence.last.bound !== null &&
155
+ typeof loadedSequence.last.bound === "object" &&
156
+ "withStructuredOutput" in loadedSequence.last.bound &&
157
+ typeof loadedSequence.last.bound.withStructuredOutput === "function") {
158
+ // eslint-disable-next-line no-param-reassign
159
+ loadedSequence.last.bound =
160
+ loadedSequence.last.bound.withStructuredOutput(loadedSequence.first.schema);
161
+ }
162
+ else if ("withStructuredOutput" in loadedSequence.last &&
163
+ typeof loadedSequence.last.withStructuredOutput === "function") {
164
+ // eslint-disable-next-line no-param-reassign
165
+ loadedSequence.last = loadedSequence.last.withStructuredOutput(loadedSequence.first.schema);
166
+ }
157
167
  }
158
168
  return loadedSequence;
159
169
  }
package/dist/hub/base.js CHANGED
@@ -87,6 +87,9 @@ modelClass) {
87
87
  else if (modelLcName === "ChatBedrockConverse") {
88
88
  importMapKey = "chat_models__chat_bedrock_converse";
89
89
  }
90
+ else if (modelLcName === "ChatMistralAI") {
91
+ importMapKey = "chat_models__mistralai";
92
+ }
90
93
  else if (modelLcName === "ChatMistral") {
91
94
  importMapKey = "chat_models__mistralai";
92
95
  }
@@ -139,14 +142,21 @@ export function bindOutputSchema(loadedSequence) {
139
142
  "schema" in loadedSequence.first &&
140
143
  "last" in loadedSequence &&
141
144
  loadedSequence.last !== null &&
142
- typeof loadedSequence.last === "object" &&
143
- "bound" in loadedSequence.last &&
144
- loadedSequence.last.bound !== null &&
145
- typeof loadedSequence.last.bound === "object" &&
146
- "withStructuredOutput" in loadedSequence.last.bound &&
147
- typeof loadedSequence.last.bound.withStructuredOutput === "function") {
148
- // eslint-disable-next-line no-param-reassign
149
- loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(loadedSequence.first.schema);
145
+ typeof loadedSequence.last === "object") {
146
+ if ("bound" in loadedSequence.last &&
147
+ loadedSequence.last.bound !== null &&
148
+ typeof loadedSequence.last.bound === "object" &&
149
+ "withStructuredOutput" in loadedSequence.last.bound &&
150
+ typeof loadedSequence.last.bound.withStructuredOutput === "function") {
151
+ // eslint-disable-next-line no-param-reassign
152
+ loadedSequence.last.bound =
153
+ loadedSequence.last.bound.withStructuredOutput(loadedSequence.first.schema);
154
+ }
155
+ else if ("withStructuredOutput" in loadedSequence.last &&
156
+ typeof loadedSequence.last.withStructuredOutput === "function") {
157
+ // eslint-disable-next-line no-param-reassign
158
+ loadedSequence.last = loadedSequence.last.withStructuredOutput(loadedSequence.first.schema);
159
+ }
150
160
  }
151
161
  return loadedSequence;
152
162
  }
package/dist/hub/node.cjs CHANGED
@@ -97,6 +97,9 @@ async function pull(ownerRepoCommit, options) {
97
97
  else if (modelName === "ChatBedrockConverse") {
98
98
  modelClass = (await Promise.resolve().then(() => __importStar(require("@langchain/aws")))).ChatBedrockConverse;
99
99
  }
100
+ else if (modelName === "ChatMistralAI") {
101
+ modelClass = (await Promise.resolve().then(() => __importStar(require("@langchain/mistralai")))).ChatMistralAI;
102
+ }
100
103
  else if (modelName === "ChatMistral") {
101
104
  modelClass = (await Promise.resolve().then(() => __importStar(require("@langchain/mistralai")))).ChatMistralAI;
102
105
  }
package/dist/hub/node.js CHANGED
@@ -61,6 +61,9 @@ export async function pull(ownerRepoCommit, options) {
61
61
  else if (modelName === "ChatBedrockConverse") {
62
62
  modelClass = (await import("@langchain/aws")).ChatBedrockConverse;
63
63
  }
64
+ else if (modelName === "ChatMistralAI") {
65
+ modelClass = (await import("@langchain/mistralai")).ChatMistralAI;
66
+ }
64
67
  else if (modelName === "ChatMistral") {
65
68
  modelClass = (await import("@langchain/mistralai")).ChatMistralAI;
66
69
  }
@@ -35,7 +35,7 @@ var __importStar = (this && this.__importStar) || (function () {
35
35
  })();
36
36
  Object.defineProperty(exports, "__esModule", { value: true });
37
37
  exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.stores__doc__base = exports.retrievers__matryoshka_retriever = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__ensemble = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.output_parsers = exports.callbacks = exports.document_transformers__openai_functions = exports.document_loaders__base = exports.memory__chat_memory = exports.memory = exports.text_splitter = exports.vectorstores__memory = exports.embeddings__fake = exports.embeddings__cache_backed = exports.chains__retrieval = exports.chains__openai_functions = exports.chains__history_aware_retriever = exports.chains__combine_documents__reduce = exports.chains__combine_documents = exports.chains = exports.tools__retriever = exports.tools__render = exports.tools__chain = exports.tools = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = void 0;
38
- exports.schema__output = exports.schema__output_parser = exports.schema__runnable = exports.prompts__base = exports.prompts__pipeline = exports.prompts__image = exports.prompts__chat = exports.schema = exports.schema__messages = exports.prompts__prompt = exports.embeddings__azure_openai = exports.embeddings__openai = exports.llms__azure_openai = exports.llms__openai = exports.chat_models__azure_openai = exports.chat_models__openai = exports.schema__prompt_template = exports.schema__query_constructor = exports.indexes = exports.runnables__remote = exports.smith = exports.evaluation = exports.experimental__prompts__custom_format = exports.experimental__masking = exports.experimental__chains__violation_of_expectations = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_files = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = void 0;
38
+ exports.langsmith = exports.schema__output = exports.schema__output_parser = exports.schema__runnable = exports.prompts__base = exports.prompts__pipeline = exports.prompts__image = exports.prompts__chat = exports.schema = exports.schema__messages = exports.prompts__prompt = exports.embeddings__azure_openai = exports.embeddings__openai = exports.llms__azure_openai = exports.llms__openai = exports.chat_models__azure_openai = exports.chat_models__openai = exports.schema__prompt_template = exports.schema__query_constructor = exports.indexes = exports.runnables__remote = exports.smith = exports.evaluation = exports.experimental__prompts__custom_format = exports.experimental__masking = exports.experimental__chains__violation_of_expectations = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_files = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = void 0;
39
39
  exports.agents = __importStar(require("../agents/index.cjs"));
40
40
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
41
41
  exports.agents__format_scratchpad = __importStar(require("../agents/format_scratchpad/openai_functions.cjs"));
@@ -109,6 +109,7 @@ const prompt_values_1 = require("@langchain/core/prompt_values");
109
109
  const runnables_1 = require("@langchain/core/runnables");
110
110
  const output_parsers_1 = require("@langchain/core/output_parsers");
111
111
  const outputs_1 = require("@langchain/core/outputs");
112
+ const langsmith_1 = require("langsmith");
112
113
  const chat_models__openai = {
113
114
  ChatOpenAI: openai_1.ChatOpenAI
114
115
  };
@@ -217,3 +218,7 @@ const schema__output = {
217
218
  GenerationChunk: outputs_1.GenerationChunk
218
219
  };
219
220
  exports.schema__output = schema__output;
221
+ const langsmith = {
222
+ Client: langsmith_1.Client
223
+ };
224
+ exports.langsmith = langsmith;
@@ -71,6 +71,7 @@ import { StringPromptValue } from "@langchain/core/prompt_values";
71
71
  import { RouterRunnable, RunnableAssign, RunnableBinding, RunnableBranch, RunnableEach, RunnableMap, RunnableParallel, RunnablePassthrough, RunnablePick, RunnableRetry, RunnableSequence, RunnableWithFallbacks, RunnableWithMessageHistory } from "@langchain/core/runnables";
72
72
  import { StringOutputParser } from "@langchain/core/output_parsers";
73
73
  import { ChatGenerationChunk, GenerationChunk } from "@langchain/core/outputs";
74
+ import { Client } from "langsmith";
74
75
  declare const chat_models__openai: {
75
76
  ChatOpenAI: typeof ChatOpenAI;
76
77
  };
@@ -179,3 +180,7 @@ declare const schema__output: {
179
180
  GenerationChunk: typeof GenerationChunk;
180
181
  };
181
182
  export { schema__output };
183
+ declare const langsmith: {
184
+ Client: typeof Client;
185
+ };
186
+ export { langsmith };
@@ -72,6 +72,7 @@ import { StringPromptValue } from "@langchain/core/prompt_values";
72
72
  import { RouterRunnable, RunnableAssign, RunnableBinding, RunnableBranch, RunnableEach, RunnableMap, RunnableParallel, RunnablePassthrough, RunnablePick, RunnableRetry, RunnableSequence, RunnableWithFallbacks, RunnableWithMessageHistory } from "@langchain/core/runnables";
73
73
  import { StringOutputParser } from "@langchain/core/output_parsers";
74
74
  import { ChatGenerationChunk, GenerationChunk } from "@langchain/core/outputs";
75
+ import { Client } from "langsmith";
75
76
  const chat_models__openai = {
76
77
  ChatOpenAI
77
78
  };
@@ -180,3 +181,7 @@ const schema__output = {
180
181
  GenerationChunk
181
182
  };
182
183
  export { schema__output };
184
+ const langsmith = {
185
+ Client
186
+ };
187
+ export { langsmith };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.3.34",
3
+ "version": "0.3.36",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {