langchain 0.3.15 → 0.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,6 @@ const _SUPPORTED_PROVIDERS = [
12
12
  "google-vertexai",
13
13
  "google-vertexai-web",
14
14
  "google-genai",
15
- "google-genai",
16
15
  "ollama",
17
16
  "together",
18
17
  "fireworks",
@@ -385,6 +384,7 @@ class _ConfigurableModel extends chat_models_1.BaseChatModel {
385
384
  * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.
386
385
  *
387
386
  * @param {string | ChatModelProvider} [model] - The name of the model, e.g. "gpt-4", "claude-3-opus-20240229".
387
+ * Can be prefixed with the model provider, e.g. "openai:gpt-4", "anthropic:claude-3-opus-20240229".
388
388
  * @param {Object} [fields] - Additional configuration options.
389
389
  * @param {string} [fields.modelProvider] - The model provider. Supported values include:
390
390
  * - openai (@langchain/openai)
@@ -416,14 +416,12 @@ class _ConfigurableModel extends chat_models_1.BaseChatModel {
416
416
  * ```typescript
417
417
  * import { initChatModel } from "langchain/chat_models/universal";
418
418
  *
419
- * const gpt4 = await initChatModel("gpt-4", {
420
- * modelProvider: "openai",
419
+ * const gpt4 = await initChatModel("openai:gpt-4", {
421
420
  * temperature: 0.25,
422
421
  * });
423
422
  * const gpt4Result = await gpt4.invoke("what's your name");
424
423
  *
425
- * const claude = await initChatModel("claude-3-opus-20240229", {
426
- * modelProvider: "anthropic",
424
+ * const claude = await initChatModel("anthropic:claude-3-opus-20240229", {
427
425
  * temperature: 0.25,
428
426
  * });
429
427
  * const claudeResult = await claude.invoke("what's your name");
@@ -585,10 +583,18 @@ class _ConfigurableModel extends chat_models_1.BaseChatModel {
585
583
  async function initChatModel(model,
586
584
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
587
585
  fields) {
588
- const { configurableFields, configPrefix, modelProvider, ...params } = {
586
+ // eslint-disable-next-line prefer-const
587
+ let { configurableFields, configPrefix, modelProvider, ...params } = {
589
588
  configPrefix: "",
590
589
  ...(fields ?? {}),
591
590
  };
591
+ if (modelProvider === undefined && model?.includes(":")) {
592
+ const modelComponents = model.split(":", 1);
593
+ if (_SUPPORTED_PROVIDERS.includes(modelComponents[0])) {
594
+ // eslint-disable-next-line no-param-reassign
595
+ [modelProvider, model] = modelComponents;
596
+ }
597
+ }
592
598
  let configurableFieldsCopy = Array.isArray(configurableFields)
593
599
  ? [...configurableFields]
594
600
  : configurableFields;
@@ -9,7 +9,7 @@ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
9
9
  import { ChatResult } from "@langchain/core/outputs";
10
10
  interface EventStreamCallbackHandlerInput extends Omit<LogStreamCallbackHandlerInput, "_schemaFormat"> {
11
11
  }
12
- declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek"];
12
+ declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-vertexai-web", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock", "cerebras", "deepseek"];
13
13
  export type ChatModelProvider = (typeof _SUPPORTED_PROVIDERS)[number];
14
14
  export interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions {
15
15
  tools?: (StructuredToolInterface | Record<string, unknown> | ToolDefinition | RunnableToolLike)[];
@@ -9,7 +9,6 @@ const _SUPPORTED_PROVIDERS = [
9
9
  "google-vertexai",
10
10
  "google-vertexai-web",
11
11
  "google-genai",
12
- "google-genai",
13
12
  "ollama",
14
13
  "together",
15
14
  "fireworks",
@@ -381,6 +380,7 @@ class _ConfigurableModel extends BaseChatModel {
381
380
  * @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.
382
381
  *
383
382
  * @param {string | ChatModelProvider} [model] - The name of the model, e.g. "gpt-4", "claude-3-opus-20240229".
383
+ * Can be prefixed with the model provider, e.g. "openai:gpt-4", "anthropic:claude-3-opus-20240229".
384
384
  * @param {Object} [fields] - Additional configuration options.
385
385
  * @param {string} [fields.modelProvider] - The model provider. Supported values include:
386
386
  * - openai (@langchain/openai)
@@ -412,14 +412,12 @@ class _ConfigurableModel extends BaseChatModel {
412
412
  * ```typescript
413
413
  * import { initChatModel } from "langchain/chat_models/universal";
414
414
  *
415
- * const gpt4 = await initChatModel("gpt-4", {
416
- * modelProvider: "openai",
415
+ * const gpt4 = await initChatModel("openai:gpt-4", {
417
416
  * temperature: 0.25,
418
417
  * });
419
418
  * const gpt4Result = await gpt4.invoke("what's your name");
420
419
  *
421
- * const claude = await initChatModel("claude-3-opus-20240229", {
422
- * modelProvider: "anthropic",
420
+ * const claude = await initChatModel("anthropic:claude-3-opus-20240229", {
423
421
  * temperature: 0.25,
424
422
  * });
425
423
  * const claudeResult = await claude.invoke("what's your name");
@@ -581,10 +579,18 @@ class _ConfigurableModel extends BaseChatModel {
581
579
  export async function initChatModel(model,
582
580
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
583
581
  fields) {
584
- const { configurableFields, configPrefix, modelProvider, ...params } = {
582
+ // eslint-disable-next-line prefer-const
583
+ let { configurableFields, configPrefix, modelProvider, ...params } = {
585
584
  configPrefix: "",
586
585
  ...(fields ?? {}),
587
586
  };
587
+ if (modelProvider === undefined && model?.includes(":")) {
588
+ const modelComponents = model.split(":", 1);
589
+ if (_SUPPORTED_PROVIDERS.includes(modelComponents[0])) {
590
+ // eslint-disable-next-line no-param-reassign
591
+ [modelProvider, model] = modelComponents;
592
+ }
593
+ }
588
594
  let configurableFieldsCopy = Array.isArray(configurableFields)
589
595
  ? [...configurableFields]
590
596
  : configurableFields;
@@ -82,7 +82,7 @@ class OpenAIAssistantRunnable extends runnables_1.Runnable {
82
82
  {
83
83
  role: "user",
84
84
  content: input.content,
85
- file_ids: input.fileIds,
85
+ attachments: input.attachments,
86
86
  metadata: input.messagesMetadata,
87
87
  },
88
88
  ],
@@ -97,7 +97,7 @@ class OpenAIAssistantRunnable extends runnables_1.Runnable {
97
97
  await this.client.beta.threads.messages.create(input.threadId, {
98
98
  content: input.content,
99
99
  role: "user",
100
- file_ids: input.file_ids,
100
+ attachments: input.attachments,
101
101
  metadata: input.messagesMetadata,
102
102
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
103
103
  });
@@ -79,7 +79,7 @@ export class OpenAIAssistantRunnable extends Runnable {
79
79
  {
80
80
  role: "user",
81
81
  content: input.content,
82
- file_ids: input.fileIds,
82
+ attachments: input.attachments,
83
83
  metadata: input.messagesMetadata,
84
84
  },
85
85
  ],
@@ -94,7 +94,7 @@ export class OpenAIAssistantRunnable extends Runnable {
94
94
  await this.client.beta.threads.messages.create(input.threadId, {
95
95
  content: input.content,
96
96
  role: "user",
97
- file_ids: input.file_ids,
97
+ attachments: input.attachments,
98
98
  metadata: input.messagesMetadata,
99
99
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
100
100
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.3.15",
3
+ "version": "0.3.17",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {