langchain 0.3.14 → 0.3.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,6 @@ const _SUPPORTED_PROVIDERS = [
12
12
  "google-vertexai",
13
13
  "google-vertexai-web",
14
14
  "google-genai",
15
- "google-genai",
16
15
  "ollama",
17
16
  "together",
18
17
  "fireworks",
@@ -385,6 +384,7 @@ class _ConfigurableModel extends chat_models_1.BaseChatModel {
385
384
  * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.
386
385
  *
387
386
  * @param {string | ChatModelProvider} [model] - The name of the model, e.g. "gpt-4", "claude-3-opus-20240229".
387
+ * Can be prefixed with the model provider, e.g. "openai:gpt-4", "anthropic:claude-3-opus-20240229".
388
388
  * @param {Object} [fields] - Additional configuration options.
389
389
  * @param {string} [fields.modelProvider] - The model provider. Supported values include:
390
390
  * - openai (@langchain/openai)
@@ -416,14 +416,12 @@ class _ConfigurableModel extends chat_models_1.BaseChatModel {
416
416
  * ```typescript
417
417
  * import { initChatModel } from "langchain/chat_models/universal";
418
418
  *
419
- * const gpt4 = await initChatModel("gpt-4", {
420
- * modelProvider: "openai",
419
+ * const gpt4 = await initChatModel("openai:gpt-4", {
421
420
  * temperature: 0.25,
422
421
  * });
423
422
  * const gpt4Result = await gpt4.invoke("what's your name");
424
423
  *
425
- * const claude = await initChatModel("claude-3-opus-20240229", {
426
- * modelProvider: "anthropic",
424
+ * const claude = await initChatModel("anthropic:claude-3-opus-20240229", {
427
425
  * temperature: 0.25,
428
426
  * });
429
427
  * const claudeResult = await claude.invoke("what's your name");
@@ -585,10 +583,18 @@ class _ConfigurableModel extends chat_models_1.BaseChatModel {
585
583
  async function initChatModel(model,
586
584
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
587
585
  fields) {
588
- const { configurableFields, configPrefix, modelProvider, ...params } = {
586
+ // eslint-disable-next-line prefer-const
587
+ let { configurableFields, configPrefix, modelProvider, ...params } = {
589
588
  configPrefix: "",
590
589
  ...(fields ?? {}),
591
590
  };
591
+ if (modelProvider === undefined && model?.includes(":")) {
592
+ const modelComponents = model.split(":", 1);
593
+ if (_SUPPORTED_PROVIDERS.includes(modelComponents[0])) {
594
+ // eslint-disable-next-line no-param-reassign
595
+ [modelProvider, model] = modelComponents;
596
+ }
597
+ }
592
598
  let configurableFieldsCopy = Array.isArray(configurableFields)
593
599
  ? [...configurableFields]
594
600
  : configurableFields;
@@ -619,6 +625,7 @@ fields) {
619
625
  if (modelProvider) {
620
626
  paramsCopy.modelProvider = modelProvider;
621
627
  }
628
+ console.log("paramsCopy", paramsCopy);
622
629
  return new _ConfigurableModel({
623
630
  defaultConfig: paramsCopy,
624
631
  configPrefix,
@@ -9,7 +9,7 @@ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
9
9
  import { ChatResult } from "@langchain/core/outputs";
10
10
  interface EventStreamCallbackHandlerInput extends Omit<LogStreamCallbackHandlerInput, "_schemaFormat"> {
11
11
  }
12
- declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek"];
12
+ declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek"];
13
13
  export type ChatModelProvider = (typeof _SUPPORTED_PROVIDERS)[number];
14
14
  export interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {
15
15
  tools?: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[];
@@ -9,7 +9,6 @@ const _SUPPORTED_PROVIDERS = [
9
9
  "google-vertexai",
10
10
  "google-vertexai-web",
11
11
  "google-genai",
12
- "google-genai",
13
12
  "ollama",
14
13
  "together",
15
14
  "fireworks",
@@ -381,6 +380,7 @@ class _ConfigurableModel extends BaseChatModel {
381
380
  * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.
382
381
  *
383
382
  * @param {string | ChatModelProvider} [model] - The name of the model, e.g. "gpt-4", "claude-3-opus-20240229".
383
+ * Can be prefixed with the model provider, e.g. "openai:gpt-4", "anthropic:claude-3-opus-20240229".
384
384
  * @param {Object} [fields] - Additional configuration options.
385
385
  * @param {string} [fields.modelProvider] - The model provider. Supported values include:
386
386
  * - openai (@langchain/openai)
@@ -412,14 +412,12 @@ class _ConfigurableModel extends BaseChatModel {
412
412
  * ```typescript
413
413
  * import { initChatModel } from "langchain/chat_models/universal";
414
414
  *
415
- * const gpt4 = await initChatModel("gpt-4", {
416
- * modelProvider: "openai",
415
+ * const gpt4 = await initChatModel("openai:gpt-4", {
417
416
  * temperature: 0.25,
418
417
  * });
419
418
  * const gpt4Result = await gpt4.invoke("what's your name");
420
419
  *
421
- * const claude = await initChatModel("claude-3-opus-20240229", {
422
- * modelProvider: "anthropic",
420
+ * const claude = await initChatModel("anthropic:claude-3-opus-20240229", {
423
421
  * temperature: 0.25,
424
422
  * });
425
423
  * const claudeResult = await claude.invoke("what's your name");
@@ -581,10 +579,18 @@ class _ConfigurableModel extends BaseChatModel {
581
579
  export async function initChatModel(model,
582
580
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
583
581
  fields) {
584
- const { configurableFields, configPrefix, modelProvider, ...params } = {
582
+ // eslint-disable-next-line prefer-const
583
+ let { configurableFields, configPrefix, modelProvider, ...params } = {
585
584
  configPrefix: "",
586
585
  ...(fields ?? {}),
587
586
  };
587
+ if (modelProvider === undefined && model?.includes(":")) {
588
+ const modelComponents = model.split(":", 1);
589
+ if (_SUPPORTED_PROVIDERS.includes(modelComponents[0])) {
590
+ // eslint-disable-next-line no-param-reassign
591
+ [modelProvider, model] = modelComponents;
592
+ }
593
+ }
588
594
  let configurableFieldsCopy = Array.isArray(configurableFields)
589
595
  ? [...configurableFields]
590
596
  : configurableFields;
@@ -615,6 +621,7 @@ fields) {
615
621
  if (modelProvider) {
616
622
  paramsCopy.modelProvider = modelProvider;
617
623
  }
624
+ console.log("paramsCopy", paramsCopy);
618
625
  return new _ConfigurableModel({
619
626
  defaultConfig: paramsCopy,
620
627
  configPrefix,
@@ -82,7 +82,7 @@ class OpenAIAssistantRunnable extends runnables_1.Runnable {
82
82
  {
83
83
  role: "user",
84
84
  content: input.content,
85
- file_ids: input.fileIds,
85
+ attachments: input.attachments,
86
86
  metadata: input.messagesMetadata,
87
87
  },
88
88
  ],
@@ -97,7 +97,7 @@ class OpenAIAssistantRunnable extends runnables_1.Runnable {
97
97
  await this.client.beta.threads.messages.create(input.threadId, {
98
98
  content: input.content,
99
99
  role: "user",
100
- file_ids: input.file_ids,
100
+ attachments: input.attachments,
101
101
  metadata: input.messagesMetadata,
102
102
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
103
103
  });
@@ -79,7 +79,7 @@ export class OpenAIAssistantRunnable extends Runnable {
79
79
  {
80
80
  role: "user",
81
81
  content: input.content,
82
- file_ids: input.fileIds,
82
+ attachments: input.attachments,
83
83
  metadata: input.messagesMetadata,
84
84
  },
85
85
  ],
@@ -94,7 +94,7 @@ export class OpenAIAssistantRunnable extends Runnable {
94
94
  await this.client.beta.threads.messages.create(input.threadId, {
95
95
  content: input.content,
96
96
  role: "user",
97
- file_ids: input.file_ids,
97
+ attachments: input.attachments,
98
98
  metadata: input.messagesMetadata,
99
99
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
100
100
  });
package/dist/hub/base.cjs CHANGED
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.generateModelImportMap = exports.basePull = exports.basePush = void 0;
3
+ exports.generateOptionalImportMap = exports.generateModelImportMap = exports.basePull = exports.basePush = void 0;
4
4
  const langsmith_1 = require("langsmith");
5
5
  /**
6
6
  * Push a prompt to the hub.
@@ -70,10 +70,6 @@ function generateModelImportMap(
70
70
  modelClass) {
71
71
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
72
72
  const modelImportMap = {};
73
- // TODO: Fix in 0.4.0. We can't get lc_id without instantiating the class, so we
74
- // must put them inline here. In the future, make this less hacky
75
- // This should probably use dynamic imports and have a web-only entrypoint
76
- // in a future breaking release
77
73
  if (modelClass !== undefined) {
78
74
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
79
75
  const modelLcName = modelClass?.lc_name();
@@ -113,3 +109,30 @@ modelClass) {
113
109
  return modelImportMap;
114
110
  }
115
111
  exports.generateModelImportMap = generateModelImportMap;
112
+ function generateOptionalImportMap(
113
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
114
+ modelClass) {
115
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
116
+ const optionalImportMap = {};
117
+ if (modelClass !== undefined) {
118
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
119
+ const modelLcName = modelClass?.lc_name();
120
+ let optionalImportMapKey;
121
+ if (modelLcName === "ChatGoogleGenerativeAI") {
122
+ optionalImportMapKey = "langchain_google_genai/chat_models";
123
+ }
124
+ else if (modelLcName === "ChatBedrockConverse") {
125
+ optionalImportMapKey = "langchain_aws/chat_models";
126
+ }
127
+ else if (modelLcName === "ChatGroq") {
128
+ optionalImportMapKey = "langchain_groq/chat_models";
129
+ }
130
+ if (optionalImportMapKey !== undefined) {
131
+ optionalImportMap[optionalImportMapKey] = {
132
+ [modelLcName]: modelClass,
133
+ };
134
+ }
135
+ }
136
+ return optionalImportMap;
137
+ }
138
+ exports.generateOptionalImportMap = generateOptionalImportMap;
@@ -27,3 +27,4 @@ export declare function basePull(ownerRepoCommit: string, options?: {
27
27
  includeModel?: boolean;
28
28
  }): Promise<import("langsmith/schemas").PromptCommit>;
29
29
  export declare function generateModelImportMap(modelClass?: new (...args: any[]) => BaseLanguageModel): Record<string, any>;
30
+ export declare function generateOptionalImportMap(modelClass?: new (...args: any[]) => BaseLanguageModel): Record<string, any>;
package/dist/hub/base.js CHANGED
@@ -65,10 +65,6 @@ export function generateModelImportMap(
65
65
  modelClass) {
66
66
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
67
67
  const modelImportMap = {};
68
- // TODO: Fix in 0.4.0. We can't get lc_id without instantiating the class, so we
69
- // must put them inline here. In the future, make this less hacky
70
- // This should probably use dynamic imports and have a web-only entrypoint
71
- // in a future breaking release
72
68
  if (modelClass !== undefined) {
73
69
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
74
70
  const modelLcName = modelClass?.lc_name();
@@ -107,3 +103,29 @@ modelClass) {
107
103
  }
108
104
  return modelImportMap;
109
105
  }
106
+ export function generateOptionalImportMap(
107
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
108
+ modelClass) {
109
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
110
+ const optionalImportMap = {};
111
+ if (modelClass !== undefined) {
112
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
113
+ const modelLcName = modelClass?.lc_name();
114
+ let optionalImportMapKey;
115
+ if (modelLcName === "ChatGoogleGenerativeAI") {
116
+ optionalImportMapKey = "langchain_google_genai/chat_models";
117
+ }
118
+ else if (modelLcName === "ChatBedrockConverse") {
119
+ optionalImportMapKey = "langchain_aws/chat_models";
120
+ }
121
+ else if (modelLcName === "ChatGroq") {
122
+ optionalImportMapKey = "langchain_groq/chat_models";
123
+ }
124
+ if (optionalImportMapKey !== undefined) {
125
+ optionalImportMap[optionalImportMapKey] = {
126
+ [modelLcName]: modelClass,
127
+ };
128
+ }
129
+ }
130
+ return optionalImportMap;
131
+ }
@@ -23,7 +23,7 @@ Object.defineProperty(exports, "push", { enumerable: true, get: function () { re
23
23
  async function pull(ownerRepoCommit, options) {
24
24
  const promptObject = await (0, base_js_1.basePull)(ownerRepoCommit, options);
25
25
  try {
26
- const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, undefined, (0, base_js_1.generateModelImportMap)(options?.modelClass));
26
+ const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, (0, base_js_1.generateOptionalImportMap)(options?.modelClass), (0, base_js_1.generateModelImportMap)(options?.modelClass));
27
27
  return loadedPrompt;
28
28
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
29
29
  }
package/dist/hub/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  import { load } from "../load/index.js";
2
- import { basePush, basePull, generateModelImportMap } from "./base.js";
2
+ import { basePush, basePull, generateModelImportMap, generateOptionalImportMap, } from "./base.js";
3
3
  export { basePush as push };
4
4
  /**
5
5
  * Pull a prompt from the hub.
@@ -20,7 +20,7 @@ export { basePush as push };
20
20
  export async function pull(ownerRepoCommit, options) {
21
21
  const promptObject = await basePull(ownerRepoCommit, options);
22
22
  try {
23
- const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, undefined, generateModelImportMap(options?.modelClass));
23
+ const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, generateOptionalImportMap(options?.modelClass), generateModelImportMap(options?.modelClass));
24
24
  return loadedPrompt;
25
25
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
26
26
  }
package/dist/hub/node.cjs CHANGED
@@ -47,7 +47,7 @@ async function pull(ownerRepoCommit, options) {
47
47
  }
48
48
  }
49
49
  }
50
- const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, undefined, (0, base_js_1.generateModelImportMap)(modelClass));
50
+ const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, (0, base_js_1.generateOptionalImportMap)(modelClass), (0, base_js_1.generateModelImportMap)(modelClass));
51
51
  return loadedPrompt;
52
52
  }
53
53
  exports.pull = pull;
package/dist/hub/node.js CHANGED
@@ -1,4 +1,4 @@
1
- import { basePush, basePull, generateModelImportMap } from "./base.js";
1
+ import { basePush, basePull, generateModelImportMap, generateOptionalImportMap, } from "./base.js";
2
2
  import { load } from "../load/index.js";
3
3
  // TODO: Make this the default, add web entrypoint in next breaking release
4
4
  export { basePush as push };
@@ -45,6 +45,6 @@ export async function pull(ownerRepoCommit, options) {
45
45
  }
46
46
  }
47
47
  }
48
- const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, undefined, generateModelImportMap(modelClass));
48
+ const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, generateOptionalImportMap(modelClass), generateModelImportMap(modelClass));
49
49
  return loadedPrompt;
50
50
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.3.14",
3
+ "version": "0.3.16",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {