langchain 0.0.143 → 0.0.145

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/callbacks/handlers/llmonitor.cjs +1 -0
  2. package/callbacks/handlers/llmonitor.d.ts +1 -0
  3. package/callbacks/handlers/llmonitor.js +1 -0
  4. package/dist/agents/mrkl/outputParser.cjs +1 -1
  5. package/dist/agents/mrkl/outputParser.js +1 -1
  6. package/dist/base_language/index.cjs +2 -1
  7. package/dist/base_language/index.d.ts +7 -2
  8. package/dist/base_language/index.js +2 -1
  9. package/dist/callbacks/handlers/llmonitor.cjs +223 -0
  10. package/dist/callbacks/handlers/llmonitor.d.ts +35 -0
  11. package/dist/callbacks/handlers/llmonitor.js +215 -0
  12. package/dist/chains/openai_functions/extraction.d.ts +4 -4
  13. package/dist/chains/openai_functions/openapi.d.ts +3 -3
  14. package/dist/chains/openai_functions/structured_output.d.ts +5 -4
  15. package/dist/chains/openai_functions/tagging.d.ts +4 -4
  16. package/dist/chains/openai_moderation.cjs +1 -0
  17. package/dist/chains/openai_moderation.js +1 -0
  18. package/dist/chat_models/base.cjs +4 -3
  19. package/dist/chat_models/base.d.ts +3 -3
  20. package/dist/chat_models/base.js +5 -4
  21. package/dist/chat_models/minimax.d.ts +6 -28
  22. package/dist/chat_models/openai.d.ts +2 -3
  23. package/dist/document_loaders/fs/openai_whisper_audio.cjs +32 -0
  24. package/dist/document_loaders/fs/openai_whisper_audio.d.ts +11 -0
  25. package/dist/document_loaders/fs/openai_whisper_audio.js +28 -0
  26. package/dist/document_loaders/web/github.cjs +210 -24
  27. package/dist/document_loaders/web/github.d.ts +44 -1
  28. package/dist/document_loaders/web/github.js +210 -24
  29. package/dist/document_loaders/web/recursive_url.cjs +13 -0
  30. package/dist/document_loaders/web/recursive_url.js +13 -0
  31. package/dist/embeddings/hf_transformers.cjs +71 -0
  32. package/dist/embeddings/hf_transformers.d.ts +29 -0
  33. package/dist/embeddings/hf_transformers.js +67 -0
  34. package/dist/embeddings/ollama.cjs +114 -0
  35. package/dist/embeddings/ollama.d.ts +34 -0
  36. package/dist/embeddings/ollama.js +110 -0
  37. package/dist/experimental/chat_models/anthropic_functions.d.ts +2 -5
  38. package/dist/load/import_constants.cjs +3 -0
  39. package/dist/load/import_constants.js +3 -0
  40. package/dist/load/import_map.cjs +3 -2
  41. package/dist/load/import_map.d.ts +1 -0
  42. package/dist/load/import_map.js +1 -0
  43. package/dist/prompts/chat.cjs +27 -1
  44. package/dist/prompts/chat.d.ts +3 -2
  45. package/dist/prompts/chat.js +28 -2
  46. package/dist/schema/index.cjs +44 -1
  47. package/dist/schema/index.d.ts +10 -0
  48. package/dist/schema/index.js +41 -0
  49. package/dist/tools/serpapi.cjs +108 -13
  50. package/dist/tools/serpapi.js +108 -13
  51. package/dist/vectorstores/redis.cjs +12 -4
  52. package/dist/vectorstores/redis.d.ts +8 -0
  53. package/dist/vectorstores/redis.js +12 -4
  54. package/dist/vectorstores/tigris.cjs +2 -0
  55. package/dist/vectorstores/tigris.d.ts +2 -3
  56. package/dist/vectorstores/tigris.js +2 -0
  57. package/dist/vectorstores/vectara.cjs +30 -12
  58. package/dist/vectorstores/vectara.d.ts +1 -1
  59. package/dist/vectorstores/vectara.js +30 -12
  60. package/document_loaders/fs/openai_whisper_audio.cjs +1 -0
  61. package/document_loaders/fs/openai_whisper_audio.d.ts +1 -0
  62. package/document_loaders/fs/openai_whisper_audio.js +1 -0
  63. package/embeddings/hf_transformers.cjs +1 -0
  64. package/embeddings/hf_transformers.d.ts +1 -0
  65. package/embeddings/hf_transformers.js +1 -0
  66. package/embeddings/ollama.cjs +1 -0
  67. package/embeddings/ollama.d.ts +1 -0
  68. package/embeddings/ollama.js +1 -0
  69. package/package.json +52 -14
@@ -0,0 +1,114 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OllamaEmbeddings = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ class OllamaEmbeddings extends base_js_1.Embeddings {
6
+ constructor(params) {
7
+ super(params || {});
8
+ Object.defineProperty(this, "model", {
9
+ enumerable: true,
10
+ configurable: true,
11
+ writable: true,
12
+ value: "llama2"
13
+ });
14
+ Object.defineProperty(this, "baseUrl", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: "http://localhost:11434"
19
+ });
20
+ Object.defineProperty(this, "requestOptions", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: void 0
25
+ });
26
+ if (params?.model) {
27
+ this.model = params.model;
28
+ }
29
+ if (params?.baseUrl) {
30
+ this.baseUrl = params.baseUrl;
31
+ }
32
+ if (params?.requestOptions) {
33
+ this.requestOptions = this._convertOptions(params.requestOptions);
34
+ }
35
+ }
36
+ /** convert camelCased Ollama request options like "useMMap" to
37
+ * the snake_cased equivalent which the ollama API actually uses.
38
+ * Used only for consistency with the llms/Ollama and chatModels/Ollama classes
39
+ */
40
+ _convertOptions(requestOptions) {
41
+ const snakeCasedOptions = {};
42
+ const mapping = {
43
+ embeddingOnly: "embedding_only",
44
+ f16KV: "f16_kv",
45
+ frequencyPenalty: "frequency_penalty",
46
+ logitsAll: "logits_all",
47
+ lowVram: "low_vram",
48
+ mainGpu: "main_gpu",
49
+ mirostat: "mirostat",
50
+ mirostatEta: "mirostat_eta",
51
+ mirostatTau: "mirostat_tau",
52
+ numBatch: "num_batch",
53
+ numCtx: "num_ctx",
54
+ numGpu: "num_gpu",
55
+ numGqa: "num_gqa",
56
+ numKeep: "num_keep",
57
+ numThread: "num_thread",
58
+ penalizeNewline: "penalize_newline",
59
+ presencePenalty: "presence_penalty",
60
+ repeatLastN: "repeat_last_n",
61
+ repeatPenalty: "repeat_penalty",
62
+ ropeFrequencyBase: "rope_frequency_base",
63
+ ropeFrequencyScale: "rope_frequency_scale",
64
+ temperature: "temperature",
65
+ stop: "stop",
66
+ tfsZ: "tfs_z",
67
+ topK: "top_k",
68
+ topP: "top_p",
69
+ typicalP: "typical_p",
70
+ useMLock: "use_mlock",
71
+ useMMap: "use_mmap",
72
+ vocabOnly: "vocab_only",
73
+ };
74
+ for (const [key, value] of Object.entries(requestOptions)) {
75
+ const snakeCasedOption = mapping[key];
76
+ if (snakeCasedOption) {
77
+ snakeCasedOptions[snakeCasedOption] = value;
78
+ }
79
+ }
80
+ return snakeCasedOptions;
81
+ }
82
+ async _request(prompt) {
83
+ const { model, baseUrl, requestOptions } = this;
84
+ const response = await fetch(`${baseUrl}/api/embeddings`, {
85
+ method: "POST",
86
+ headers: { "Content-Type": "application/json" },
87
+ body: JSON.stringify({
88
+ prompt,
89
+ model,
90
+ options: requestOptions,
91
+ }),
92
+ });
93
+ if (!response.ok) {
94
+ throw new Error(`Request to Ollama server failed: ${response.status} ${response.statusText}`);
95
+ }
96
+ const json = await response.json();
97
+ return json.embedding;
98
+ }
99
+ async _embed(strings) {
100
+ const embeddings = [];
101
+ for await (const prompt of strings) {
102
+ const embedding = await this.caller.call(() => this._request(prompt));
103
+ embeddings.push(embedding);
104
+ }
105
+ return embeddings;
106
+ }
107
+ async embedDocuments(documents) {
108
+ return this._embed(documents);
109
+ }
110
+ async embedQuery(document) {
111
+ return (await this.embedDocuments([document]))[0];
112
+ }
113
+ }
114
+ exports.OllamaEmbeddings = OllamaEmbeddings;
@@ -0,0 +1,34 @@
1
+ import { OllamaInput, OllamaRequestParams } from "../util/ollama.js";
2
+ import { Embeddings, EmbeddingsParams } from "./base.js";
3
+ type CamelCasedRequestOptions = Omit<OllamaInput, "baseUrl" | "model">;
4
+ /**
5
+ * Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
6
+ * defines additional parameters specific to the OllamaEmbeddings class.
7
+ */
8
+ interface OllamaEmbeddingsParams extends EmbeddingsParams {
9
+ /** The Ollama model to use, e.g: "llama2:13b" */
10
+ model?: string;
11
+ /** Base URL of the Ollama server, defaults to "http://localhost:11434" */
12
+ baseUrl?: string;
13
+ /** Advanced Ollama API request parameters in camelCase, see
14
+ * https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
15
+ * for details of the available parameters.
16
+ */
17
+ requestOptions?: CamelCasedRequestOptions;
18
+ }
19
+ export declare class OllamaEmbeddings extends Embeddings {
20
+ model: string;
21
+ baseUrl: string;
22
+ requestOptions?: OllamaRequestParams["options"];
23
+ constructor(params?: OllamaEmbeddingsParams);
24
+ /** convert camelCased Ollama request options like "useMMap" to
25
+ * the snake_cased equivalent which the ollama API actually uses.
26
+ * Used only for consistency with the llms/Ollama and chatModels/Ollama classes
27
+ */
28
+ _convertOptions(requestOptions: CamelCasedRequestOptions): Record<string, unknown>;
29
+ _request(prompt: string): Promise<number[]>;
30
+ _embed(strings: string[]): Promise<number[][]>;
31
+ embedDocuments(documents: string[]): Promise<number[][]>;
32
+ embedQuery(document: string): Promise<number[]>;
33
+ }
34
+ export {};
@@ -0,0 +1,110 @@
1
+ import { Embeddings } from "./base.js";
2
+ export class OllamaEmbeddings extends Embeddings {
3
+ constructor(params) {
4
+ super(params || {});
5
+ Object.defineProperty(this, "model", {
6
+ enumerable: true,
7
+ configurable: true,
8
+ writable: true,
9
+ value: "llama2"
10
+ });
11
+ Object.defineProperty(this, "baseUrl", {
12
+ enumerable: true,
13
+ configurable: true,
14
+ writable: true,
15
+ value: "http://localhost:11434"
16
+ });
17
+ Object.defineProperty(this, "requestOptions", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: void 0
22
+ });
23
+ if (params?.model) {
24
+ this.model = params.model;
25
+ }
26
+ if (params?.baseUrl) {
27
+ this.baseUrl = params.baseUrl;
28
+ }
29
+ if (params?.requestOptions) {
30
+ this.requestOptions = this._convertOptions(params.requestOptions);
31
+ }
32
+ }
33
+ /** convert camelCased Ollama request options like "useMMap" to
34
+ * the snake_cased equivalent which the ollama API actually uses.
35
+ * Used only for consistency with the llms/Ollama and chatModels/Ollama classes
36
+ */
37
+ _convertOptions(requestOptions) {
38
+ const snakeCasedOptions = {};
39
+ const mapping = {
40
+ embeddingOnly: "embedding_only",
41
+ f16KV: "f16_kv",
42
+ frequencyPenalty: "frequency_penalty",
43
+ logitsAll: "logits_all",
44
+ lowVram: "low_vram",
45
+ mainGpu: "main_gpu",
46
+ mirostat: "mirostat",
47
+ mirostatEta: "mirostat_eta",
48
+ mirostatTau: "mirostat_tau",
49
+ numBatch: "num_batch",
50
+ numCtx: "num_ctx",
51
+ numGpu: "num_gpu",
52
+ numGqa: "num_gqa",
53
+ numKeep: "num_keep",
54
+ numThread: "num_thread",
55
+ penalizeNewline: "penalize_newline",
56
+ presencePenalty: "presence_penalty",
57
+ repeatLastN: "repeat_last_n",
58
+ repeatPenalty: "repeat_penalty",
59
+ ropeFrequencyBase: "rope_frequency_base",
60
+ ropeFrequencyScale: "rope_frequency_scale",
61
+ temperature: "temperature",
62
+ stop: "stop",
63
+ tfsZ: "tfs_z",
64
+ topK: "top_k",
65
+ topP: "top_p",
66
+ typicalP: "typical_p",
67
+ useMLock: "use_mlock",
68
+ useMMap: "use_mmap",
69
+ vocabOnly: "vocab_only",
70
+ };
71
+ for (const [key, value] of Object.entries(requestOptions)) {
72
+ const snakeCasedOption = mapping[key];
73
+ if (snakeCasedOption) {
74
+ snakeCasedOptions[snakeCasedOption] = value;
75
+ }
76
+ }
77
+ return snakeCasedOptions;
78
+ }
79
+ async _request(prompt) {
80
+ const { model, baseUrl, requestOptions } = this;
81
+ const response = await fetch(`${baseUrl}/api/embeddings`, {
82
+ method: "POST",
83
+ headers: { "Content-Type": "application/json" },
84
+ body: JSON.stringify({
85
+ prompt,
86
+ model,
87
+ options: requestOptions,
88
+ }),
89
+ });
90
+ if (!response.ok) {
91
+ throw new Error(`Request to Ollama server failed: ${response.status} ${response.statusText}`);
92
+ }
93
+ const json = await response.json();
94
+ return json.embedding;
95
+ }
96
+ async _embed(strings) {
97
+ const embeddings = [];
98
+ for await (const prompt of strings) {
99
+ const embedding = await this.caller.call(() => this._request(prompt));
100
+ embeddings.push(embedding);
101
+ }
102
+ return embeddings;
103
+ }
104
+ async embedDocuments(documents) {
105
+ return this._embed(documents);
106
+ }
107
+ async embedQuery(document) {
108
+ return (await this.embedDocuments([document]))[0];
109
+ }
110
+ }
@@ -1,13 +1,10 @@
1
- import type { OpenAI as OpenAIClient } from "openai";
2
1
  import { BaseChatModelParams } from "../../chat_models/base.js";
3
2
  import { CallbackManagerForLLMRun } from "../../callbacks/manager.js";
4
3
  import { BaseMessage, ChatResult } from "../../schema/index.js";
5
4
  import { ChatAnthropic, type AnthropicInput } from "../../chat_models/anthropic.js";
6
- import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
5
+ import { BaseFunctionCallOptions } from "../../base_language/index.js";
7
6
  import { StructuredTool } from "../../tools/base.js";
8
- export interface ChatAnthropicFunctionsCallOptions extends BaseLanguageModelCallOptions {
9
- function_call?: OpenAIClient.Chat.ChatCompletionCreateParams.FunctionCallOption;
10
- functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];
7
+ export interface ChatAnthropicFunctionsCallOptions extends BaseFunctionCallOptions {
11
8
  tools?: StructuredTool[];
12
9
  }
13
10
  export declare class AnthropicFunctions extends ChatAnthropic<ChatAnthropicFunctionsCallOptions> {
@@ -18,6 +18,7 @@ exports.optionalImportEntrypoints = [
18
18
  "langchain/embeddings/cohere",
19
19
  "langchain/embeddings/tensorflow",
20
20
  "langchain/embeddings/hf",
21
+ "langchain/embeddings/hf_transformers",
21
22
  "langchain/embeddings/googlevertexai",
22
23
  "langchain/embeddings/googlepalm",
23
24
  "langchain/llms/load",
@@ -87,11 +88,13 @@ exports.optionalImportEntrypoints = [
87
88
  "langchain/document_loaders/fs/csv",
88
89
  "langchain/document_loaders/fs/notion",
89
90
  "langchain/document_loaders/fs/unstructured",
91
+ "langchain/document_loaders/fs/openai_whisper_audio",
90
92
  "langchain/document_transformers/html_to_text",
91
93
  "langchain/document_transformers/mozilla_readability",
92
94
  "langchain/chat_models/googlevertexai",
93
95
  "langchain/chat_models/googlepalm",
94
96
  "langchain/sql_db",
97
+ "langchain/callbacks/handlers/llmonitor",
95
98
  "langchain/output_parsers/expression",
96
99
  "langchain/retrievers/amazon_kendra",
97
100
  "langchain/retrievers/supabase",
@@ -15,6 +15,7 @@ export const optionalImportEntrypoints = [
15
15
  "langchain/embeddings/cohere",
16
16
  "langchain/embeddings/tensorflow",
17
17
  "langchain/embeddings/hf",
18
+ "langchain/embeddings/hf_transformers",
18
19
  "langchain/embeddings/googlevertexai",
19
20
  "langchain/embeddings/googlepalm",
20
21
  "langchain/llms/load",
@@ -84,11 +85,13 @@ export const optionalImportEntrypoints = [
84
85
  "langchain/document_loaders/fs/csv",
85
86
  "langchain/document_loaders/fs/notion",
86
87
  "langchain/document_loaders/fs/unstructured",
88
+ "langchain/document_loaders/fs/openai_whisper_audio",
87
89
  "langchain/document_transformers/html_to_text",
88
90
  "langchain/document_transformers/mozilla_readability",
89
91
  "langchain/chat_models/googlevertexai",
90
92
  "langchain/chat_models/googlepalm",
91
93
  "langchain/sql_db",
94
+ "langchain/callbacks/handlers/llmonitor",
92
95
  "langchain/output_parsers/expression",
93
96
  "langchain/retrievers/amazon_kendra",
94
97
  "langchain/retrievers/supabase",
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.retrievers__multi_vector = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = void 0;
27
+ exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -36,6 +36,7 @@ exports.chains__openai_functions = __importStar(require("../chains/openai_functi
36
36
  exports.embeddings__base = __importStar(require("../embeddings/base.cjs"));
37
37
  exports.embeddings__cache_backed = __importStar(require("../embeddings/cache_backed.cjs"));
38
38
  exports.embeddings__fake = __importStar(require("../embeddings/fake.cjs"));
39
+ exports.embeddings__ollama = __importStar(require("../embeddings/ollama.cjs"));
39
40
  exports.embeddings__openai = __importStar(require("../embeddings/openai.cjs"));
40
41
  exports.embeddings__minimax = __importStar(require("../embeddings/minimax.cjs"));
41
42
  exports.llms__base = __importStar(require("../llms/base.cjs"));
@@ -8,6 +8,7 @@ export * as chains__openai_functions from "../chains/openai_functions/index.js";
8
8
  export * as embeddings__base from "../embeddings/base.js";
9
9
  export * as embeddings__cache_backed from "../embeddings/cache_backed.js";
10
10
  export * as embeddings__fake from "../embeddings/fake.js";
11
+ export * as embeddings__ollama from "../embeddings/ollama.js";
11
12
  export * as embeddings__openai from "../embeddings/openai.js";
12
13
  export * as embeddings__minimax from "../embeddings/minimax.js";
13
14
  export * as llms__base from "../llms/base.js";
@@ -9,6 +9,7 @@ export * as chains__openai_functions from "../chains/openai_functions/index.js";
9
9
  export * as embeddings__base from "../embeddings/base.js";
10
10
  export * as embeddings__cache_backed from "../embeddings/cache_backed.js";
11
11
  export * as embeddings__fake from "../embeddings/fake.js";
12
+ export * as embeddings__ollama from "../embeddings/ollama.js";
12
13
  export * as embeddings__openai from "../embeddings/openai.js";
13
14
  export * as embeddings__minimax from "../embeddings/minimax.js";
14
15
  export * as llms__base from "../llms/base.js";
@@ -233,6 +233,32 @@ class SystemMessagePromptTemplate extends BaseMessageStringPromptTemplate {
233
233
  }
234
234
  }
235
235
  exports.SystemMessagePromptTemplate = SystemMessagePromptTemplate;
236
+ function _isBaseMessagePromptTemplate(baseMessagePromptTemplateLike) {
237
+ return (typeof baseMessagePromptTemplateLike
238
+ .formatMessages === "function");
239
+ }
240
+ function _coerceMessagePromptTemplateLike(messagePromptTemplateLike) {
241
+ if (_isBaseMessagePromptTemplate(messagePromptTemplateLike) ||
242
+ (0, index_js_1.isBaseMessage)(messagePromptTemplateLike)) {
243
+ return messagePromptTemplateLike;
244
+ }
245
+ const message = (0, index_js_1.coerceMessageLikeToMessage)(messagePromptTemplateLike);
246
+ if (message._getType() === "human") {
247
+ return HumanMessagePromptTemplate.fromTemplate(message.content);
248
+ }
249
+ else if (message._getType() === "ai") {
250
+ return AIMessagePromptTemplate.fromTemplate(message.content);
251
+ }
252
+ else if (message._getType() === "system") {
253
+ return SystemMessagePromptTemplate.fromTemplate(message.content);
254
+ }
255
+ else if (index_js_1.ChatMessage.isInstance(message)) {
256
+ return ChatMessagePromptTemplate.fromTemplate(message.content, message.role);
257
+ }
258
+ else {
259
+ throw new Error(`Could not coerce message prompt template from input. Received message type: "${message._getType()}".`);
260
+ }
261
+ }
236
262
  /**
237
263
  * Class that represents a chat prompt. It extends the
238
264
  * BaseChatPromptTemplate and uses an array of BaseMessagePromptTemplate
@@ -336,7 +362,7 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
336
362
  // eslint-disable-next-line no-instanceof/no-instanceof
337
363
  promptMessage instanceof ChatPromptTemplate
338
364
  ? promptMessage.promptMessages
339
- : [promptMessage]), []);
365
+ : [_coerceMessagePromptTemplateLike(promptMessage)]), []);
340
366
  const flattenedPartialVariables = promptMessages.reduce((acc, promptMessage) =>
341
367
  // eslint-disable-next-line no-instanceof/no-instanceof
342
368
  promptMessage instanceof ChatPromptTemplate
@@ -1,5 +1,5 @@
1
1
  import { BaseCallbackConfig } from "../callbacks/manager.js";
2
- import { BaseMessage, BasePromptValue, InputValues, PartialValues } from "../schema/index.js";
2
+ import { BaseMessage, BaseMessageLike, BasePromptValue, InputValues, PartialValues } from "../schema/index.js";
3
3
  import { Runnable } from "../schema/runnable.js";
4
4
  import { BasePromptTemplate, BasePromptTemplateInput, BaseStringPromptTemplate, TypedPromptInputValues } from "./base.js";
5
5
  /**
@@ -152,6 +152,7 @@ export interface ChatPromptTemplateInput<RunInput extends InputValues = any, Par
152
152
  */
153
153
  validateTemplate?: boolean;
154
154
  }
155
+ export type BaseMessagePromptTemplateLike = BaseMessagePromptTemplate | BaseMessageLike;
155
156
  /**
156
157
  * Class that represents a chat prompt. It extends the
157
158
  * BaseChatPromptTemplate and uses an array of BaseMessagePromptTemplate
@@ -168,5 +169,5 @@ export declare class ChatPromptTemplate<RunInput extends InputValues = any, Part
168
169
  _getPromptType(): "chat";
169
170
  formatMessages(values: TypedPromptInputValues<RunInput>): Promise<BaseMessage[]>;
170
171
  partial<NewPartialVariableName extends string>(values: PartialValues<NewPartialVariableName>): Promise<ChatPromptTemplate<InputValues<Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>>, any>>;
171
- static fromPromptMessages<RunInput extends InputValues = any>(promptMessages: (BaseMessagePromptTemplate<InputValues> | ChatPromptTemplate<InputValues, string> | BaseMessage)[]): ChatPromptTemplate<RunInput>;
172
+ static fromPromptMessages<RunInput extends InputValues = any>(promptMessages: (ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike)[]): ChatPromptTemplate<RunInput>;
172
173
  }
@@ -1,6 +1,6 @@
1
1
  // Default generic "any" values are for backwards compatibility.
2
2
  // Replace with "string" when we are comfortable with a breaking change.
3
- import { AIMessage, BaseMessage, BasePromptValue, ChatMessage, HumanMessage, SystemMessage, } from "../schema/index.js";
3
+ import { AIMessage, BaseMessage, BasePromptValue, ChatMessage, HumanMessage, SystemMessage, coerceMessageLikeToMessage, isBaseMessage, } from "../schema/index.js";
4
4
  import { Runnable } from "../schema/runnable.js";
5
5
  import { BasePromptTemplate, } from "./base.js";
6
6
  import { PromptTemplate } from "./prompt.js";
@@ -221,6 +221,32 @@ export class SystemMessagePromptTemplate extends BaseMessageStringPromptTemplate
221
221
  return new this(PromptTemplate.fromTemplate(template));
222
222
  }
223
223
  }
224
+ function _isBaseMessagePromptTemplate(baseMessagePromptTemplateLike) {
225
+ return (typeof baseMessagePromptTemplateLike
226
+ .formatMessages === "function");
227
+ }
228
+ function _coerceMessagePromptTemplateLike(messagePromptTemplateLike) {
229
+ if (_isBaseMessagePromptTemplate(messagePromptTemplateLike) ||
230
+ isBaseMessage(messagePromptTemplateLike)) {
231
+ return messagePromptTemplateLike;
232
+ }
233
+ const message = coerceMessageLikeToMessage(messagePromptTemplateLike);
234
+ if (message._getType() === "human") {
235
+ return HumanMessagePromptTemplate.fromTemplate(message.content);
236
+ }
237
+ else if (message._getType() === "ai") {
238
+ return AIMessagePromptTemplate.fromTemplate(message.content);
239
+ }
240
+ else if (message._getType() === "system") {
241
+ return SystemMessagePromptTemplate.fromTemplate(message.content);
242
+ }
243
+ else if (ChatMessage.isInstance(message)) {
244
+ return ChatMessagePromptTemplate.fromTemplate(message.content, message.role);
245
+ }
246
+ else {
247
+ throw new Error(`Could not coerce message prompt template from input. Received message type: "${message._getType()}".`);
248
+ }
249
+ }
224
250
  /**
225
251
  * Class that represents a chat prompt. It extends the
226
252
  * BaseChatPromptTemplate and uses an array of BaseMessagePromptTemplate
@@ -324,7 +350,7 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
324
350
  // eslint-disable-next-line no-instanceof/no-instanceof
325
351
  promptMessage instanceof ChatPromptTemplate
326
352
  ? promptMessage.promptMessages
327
- : [promptMessage]), []);
353
+ : [_coerceMessagePromptTemplateLike(promptMessage)]), []);
328
354
  const flattenedPartialVariables = promptMessages.reduce((acc, promptMessage) =>
329
355
  // eslint-disable-next-line no-instanceof/no-instanceof
330
356
  promptMessage instanceof ChatPromptTemplate
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
3
+ exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessage = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
4
4
  const serializable_js_1 = require("../load/serializable.cjs");
5
5
  exports.RUN_KEY = "__run";
6
6
  /**
@@ -322,6 +322,49 @@ class ChatMessage extends BaseMessage {
322
322
  }
323
323
  }
324
324
  exports.ChatMessage = ChatMessage;
325
+ function isBaseMessage(messageLike) {
326
+ return typeof messageLike._getType === "function";
327
+ }
328
+ exports.isBaseMessage = isBaseMessage;
329
+ function coerceMessageLikeToMessage(messageLike) {
330
+ if (typeof messageLike === "string") {
331
+ return new HumanMessage(messageLike);
332
+ }
333
+ else if (isBaseMessage(messageLike)) {
334
+ return messageLike;
335
+ }
336
+ let role;
337
+ let content;
338
+ let name;
339
+ if (Array.isArray(messageLike)) {
340
+ [role, content] = messageLike;
341
+ name = "";
342
+ }
343
+ else {
344
+ role = messageLike.role;
345
+ content = messageLike.content;
346
+ name = messageLike.name;
347
+ }
348
+ if (role === "human" || role === "user") {
349
+ return new HumanMessage({ content });
350
+ }
351
+ else if (role === "ai" || role === "assistant") {
352
+ return new AIMessage({ content });
353
+ }
354
+ else if (role === "system") {
355
+ return new SystemMessage({ content });
356
+ }
357
+ else if (role === "function") {
358
+ if (!name) {
359
+ throw new Error(`Unable to coerce function message from object: no "name" field provided.`);
360
+ }
361
+ return new FunctionMessage({ content, name });
362
+ }
363
+ else {
364
+ return new ChatMessage({ content, role });
365
+ }
366
+ }
367
+ exports.coerceMessageLikeToMessage = coerceMessageLikeToMessage;
325
368
  /**
326
369
  * Represents a chunk of a chat message, which can be concatenated with
327
370
  * other chat message chunks.
@@ -210,6 +210,16 @@ export declare class ChatMessage extends BaseMessage implements ChatMessageField
210
210
  _getType(): MessageType;
211
211
  static isInstance(message: BaseMessage): message is ChatMessage;
212
212
  }
213
+ export type BaseMessageLike = BaseMessage | {
214
+ role: MessageType | "user" | "assistant" | (string & Record<never, never>);
215
+ content: string;
216
+ name?: string;
217
+ } | [
218
+ MessageType | "user" | "assistant" | (string & Record<never, never>),
219
+ string
220
+ ] | string;
221
+ export declare function isBaseMessage(messageLike: BaseMessageLike): messageLike is BaseMessage;
222
+ export declare function coerceMessageLikeToMessage(messageLike: BaseMessageLike): BaseMessage;
213
223
  /**
214
224
  * Represents a chunk of a chat message, which can be concatenated with
215
225
  * other chat message chunks.
@@ -307,6 +307,47 @@ export class ChatMessage extends BaseMessage {
307
307
  return message._getType() === "generic";
308
308
  }
309
309
  }
310
+ export function isBaseMessage(messageLike) {
311
+ return typeof messageLike._getType === "function";
312
+ }
313
+ export function coerceMessageLikeToMessage(messageLike) {
314
+ if (typeof messageLike === "string") {
315
+ return new HumanMessage(messageLike);
316
+ }
317
+ else if (isBaseMessage(messageLike)) {
318
+ return messageLike;
319
+ }
320
+ let role;
321
+ let content;
322
+ let name;
323
+ if (Array.isArray(messageLike)) {
324
+ [role, content] = messageLike;
325
+ name = "";
326
+ }
327
+ else {
328
+ role = messageLike.role;
329
+ content = messageLike.content;
330
+ name = messageLike.name;
331
+ }
332
+ if (role === "human" || role === "user") {
333
+ return new HumanMessage({ content });
334
+ }
335
+ else if (role === "ai" || role === "assistant") {
336
+ return new AIMessage({ content });
337
+ }
338
+ else if (role === "system") {
339
+ return new SystemMessage({ content });
340
+ }
341
+ else if (role === "function") {
342
+ if (!name) {
343
+ throw new Error(`Unable to coerce function message from object: no "name" field provided.`);
344
+ }
345
+ return new FunctionMessage({ content, name });
346
+ }
347
+ else {
348
+ return new ChatMessage({ content, role });
349
+ }
350
+ }
310
351
  /**
311
352
  * Represents a chunk of a chat message, which can be concatenated with
312
353
  * other chat message chunks.