langchain 0.0.149 → 0.0.151

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/experimental/chat_models/bittensor.cjs +141 -0
  2. package/dist/experimental/chat_models/bittensor.d.ts +36 -0
  3. package/dist/experimental/chat_models/bittensor.js +137 -0
  4. package/dist/llms/openai.cjs +3 -2
  5. package/dist/llms/openai.js +3 -2
  6. package/dist/llms/replicate.cjs +28 -2
  7. package/dist/llms/replicate.d.ts +3 -0
  8. package/dist/llms/replicate.js +28 -2
  9. package/dist/load/import_constants.cjs +1 -0
  10. package/dist/load/import_constants.js +1 -0
  11. package/dist/load/import_map.cjs +2 -1
  12. package/dist/load/import_map.d.ts +1 -0
  13. package/dist/load/import_map.js +1 -0
  14. package/dist/prompts/prompt.cjs +2 -0
  15. package/dist/prompts/prompt.d.ts +1 -1
  16. package/dist/prompts/prompt.js +2 -0
  17. package/dist/schema/runnable/base.cjs +11 -2
  18. package/dist/schema/runnable/base.d.ts +3 -1
  19. package/dist/schema/runnable/base.js +10 -2
  20. package/dist/schema/runnable/branch.cjs +106 -0
  21. package/dist/schema/runnable/branch.d.ts +66 -0
  22. package/dist/schema/runnable/branch.js +102 -0
  23. package/dist/schema/runnable/index.cjs +12 -16
  24. package/dist/schema/runnable/index.d.ts +2 -1
  25. package/dist/schema/runnable/index.js +2 -1
  26. package/dist/vectorstores/pgvector.cjs +277 -0
  27. package/dist/vectorstores/pgvector.d.ts +132 -0
  28. package/dist/vectorstores/pgvector.js +270 -0
  29. package/experimental/chat_models/bittensor.cjs +1 -0
  30. package/experimental/chat_models/bittensor.d.ts +1 -0
  31. package/experimental/chat_models/bittensor.js +1 -0
  32. package/package.json +19 -3
  33. package/vectorstores/pgvector.cjs +1 -0
  34. package/vectorstores/pgvector.d.ts +1 -0
  35. package/vectorstores/pgvector.js +1 -0
@@ -0,0 +1,141 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.NIBittensorChatModel = void 0;
4
+ const base_js_1 = require("../../chat_models/base.cjs");
5
+ const index_js_1 = require("../../schema/index.cjs");
6
+ /**
7
+ * Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
8
+ * full of different AI models.s
9
+ * To analyze API_KEYS and logs of you usage visit
10
+ * https://api.neuralinternet.ai/api-keys
11
+ * https://api.neuralinternet.ai/logs
12
+ */
13
+ class NIBittensorChatModel extends base_js_1.BaseChatModel {
14
+ static lc_name() {
15
+ return "NIBittensorLLM";
16
+ }
17
+ constructor(fields) {
18
+ super(fields ?? {});
19
+ Object.defineProperty(this, "systemPrompt", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: void 0
24
+ });
25
+ this.systemPrompt =
26
+ fields?.systemPrompt ??
27
+ "You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
28
+ }
29
+ _combineLLMOutput() {
30
+ return [];
31
+ }
32
+ _llmType() {
33
+ return "NIBittensorLLM";
34
+ }
35
+ messageToOpenAIRole(message) {
36
+ const type = message._getType();
37
+ switch (type) {
38
+ case "system":
39
+ return "system";
40
+ case "ai":
41
+ return "assistant";
42
+ case "human":
43
+ return "user";
44
+ default:
45
+ return "user";
46
+ }
47
+ }
48
+ stringToChatMessage(message) {
49
+ return new index_js_1.ChatMessage(message, "assistant");
50
+ }
51
+ /** Call out to NIBittensorChatModel's complete endpoint.
52
+ Args:
53
+ messages: The messages to pass into the model.
54
+
55
+ Returns: The chat response by the model.
56
+
57
+ Example:
58
+ const chat = new NIBittensorChatModel();
59
+ const message = new HumanMessage('What is bittensor?');
60
+ const res = await chat.call([message]);
61
+ */
62
+ async _generate(messages) {
63
+ const processed_messages = messages.map((message) => ({
64
+ role: this.messageToOpenAIRole(message),
65
+ content: message.content,
66
+ }));
67
+ const generations = [];
68
+ try {
69
+ // Retrieve API KEY
70
+ const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
71
+ if (!apiKeyResponse.ok) {
72
+ throw new Error("Network response was not ok");
73
+ }
74
+ const apiKeysData = await apiKeyResponse.json();
75
+ const apiKey = apiKeysData[0].api_key;
76
+ const headers = {
77
+ "Content-Type": "application/json",
78
+ Authorization: `Bearer ${apiKey}`,
79
+ "Endpoint-Version": "2023-05-19",
80
+ };
81
+ const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
82
+ if (!minerResponse.ok) {
83
+ throw new Error("Network response was not ok");
84
+ }
85
+ const uids = await minerResponse.json();
86
+ if (Array.isArray(uids) && uids.length) {
87
+ for (const uid of uids) {
88
+ try {
89
+ const payload = {
90
+ uids: [uid],
91
+ messages: [
92
+ { role: "system", content: this.systemPrompt },
93
+ ...processed_messages,
94
+ ],
95
+ };
96
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
97
+ method: "POST",
98
+ headers,
99
+ body: JSON.stringify(payload),
100
+ });
101
+ if (!response.ok) {
102
+ throw new Error("Network response was not ok");
103
+ }
104
+ const chatData = await response.json();
105
+ if (chatData.choices) {
106
+ const generation = {
107
+ text: chatData.choices[0].message.content,
108
+ message: this.stringToChatMessage(chatData.choices[0].message.content),
109
+ };
110
+ generations.push(generation);
111
+ return { generations, llmOutput: {} };
112
+ }
113
+ }
114
+ catch (error) {
115
+ continue;
116
+ }
117
+ }
118
+ }
119
+ }
120
+ catch (error) {
121
+ const generation = {
122
+ text: "Sorry I am unable to provide response now, Please try again later.",
123
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
124
+ };
125
+ generations.push(generation);
126
+ return { generations, llmOutput: {} };
127
+ }
128
+ const generation = {
129
+ text: "Sorry I am unable to provide response now, Please try again later.",
130
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
131
+ };
132
+ generations.push(generation);
133
+ return { generations, llmOutput: {} };
134
+ }
135
+ identifyingParams() {
136
+ return {
137
+ systemPrompt: this.systemPrompt,
138
+ };
139
+ }
140
+ }
141
+ exports.NIBittensorChatModel = NIBittensorChatModel;
@@ -0,0 +1,36 @@
1
+ import { BaseChatModel, BaseChatModelParams } from "../../chat_models/base.js";
2
+ import { BaseMessage, ChatResult } from "../../schema/index.js";
3
+ export interface BittensorInput extends BaseChatModelParams {
4
+ systemPrompt?: string | null | undefined;
5
+ }
6
+ /**
7
+ * Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
8
+ * full of different AI models.s
9
+ * To analyze API_KEYS and logs of you usage visit
10
+ * https://api.neuralinternet.ai/api-keys
11
+ * https://api.neuralinternet.ai/logs
12
+ */
13
+ export declare class NIBittensorChatModel extends BaseChatModel implements BittensorInput {
14
+ static lc_name(): string;
15
+ systemPrompt: string;
16
+ constructor(fields?: BittensorInput);
17
+ _combineLLMOutput(): never[];
18
+ _llmType(): string;
19
+ messageToOpenAIRole(message: BaseMessage): "system" | "user" | "assistant";
20
+ stringToChatMessage(message: string): BaseMessage;
21
+ /** Call out to NIBittensorChatModel's complete endpoint.
22
+ Args:
23
+ messages: The messages to pass into the model.
24
+
25
+ Returns: The chat response by the model.
26
+
27
+ Example:
28
+ const chat = new NIBittensorChatModel();
29
+ const message = new HumanMessage('What is bittensor?');
30
+ const res = await chat.call([message]);
31
+ */
32
+ _generate(messages: BaseMessage[]): Promise<ChatResult>;
33
+ identifyingParams(): {
34
+ systemPrompt: string | null | undefined;
35
+ };
36
+ }
@@ -0,0 +1,137 @@
1
+ import { BaseChatModel } from "../../chat_models/base.js";
2
+ import { ChatMessage, } from "../../schema/index.js";
3
+ /**
4
+ * Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
5
+ * full of different AI models.s
6
+ * To analyze API_KEYS and logs of you usage visit
7
+ * https://api.neuralinternet.ai/api-keys
8
+ * https://api.neuralinternet.ai/logs
9
+ */
10
+ export class NIBittensorChatModel extends BaseChatModel {
11
+ static lc_name() {
12
+ return "NIBittensorLLM";
13
+ }
14
+ constructor(fields) {
15
+ super(fields ?? {});
16
+ Object.defineProperty(this, "systemPrompt", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: void 0
21
+ });
22
+ this.systemPrompt =
23
+ fields?.systemPrompt ??
24
+ "You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
25
+ }
26
+ _combineLLMOutput() {
27
+ return [];
28
+ }
29
+ _llmType() {
30
+ return "NIBittensorLLM";
31
+ }
32
+ messageToOpenAIRole(message) {
33
+ const type = message._getType();
34
+ switch (type) {
35
+ case "system":
36
+ return "system";
37
+ case "ai":
38
+ return "assistant";
39
+ case "human":
40
+ return "user";
41
+ default:
42
+ return "user";
43
+ }
44
+ }
45
+ stringToChatMessage(message) {
46
+ return new ChatMessage(message, "assistant");
47
+ }
48
+ /** Call out to NIBittensorChatModel's complete endpoint.
49
+ Args:
50
+ messages: The messages to pass into the model.
51
+
52
+ Returns: The chat response by the model.
53
+
54
+ Example:
55
+ const chat = new NIBittensorChatModel();
56
+ const message = new HumanMessage('What is bittensor?');
57
+ const res = await chat.call([message]);
58
+ */
59
+ async _generate(messages) {
60
+ const processed_messages = messages.map((message) => ({
61
+ role: this.messageToOpenAIRole(message),
62
+ content: message.content,
63
+ }));
64
+ const generations = [];
65
+ try {
66
+ // Retrieve API KEY
67
+ const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
68
+ if (!apiKeyResponse.ok) {
69
+ throw new Error("Network response was not ok");
70
+ }
71
+ const apiKeysData = await apiKeyResponse.json();
72
+ const apiKey = apiKeysData[0].api_key;
73
+ const headers = {
74
+ "Content-Type": "application/json",
75
+ Authorization: `Bearer ${apiKey}`,
76
+ "Endpoint-Version": "2023-05-19",
77
+ };
78
+ const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
79
+ if (!minerResponse.ok) {
80
+ throw new Error("Network response was not ok");
81
+ }
82
+ const uids = await minerResponse.json();
83
+ if (Array.isArray(uids) && uids.length) {
84
+ for (const uid of uids) {
85
+ try {
86
+ const payload = {
87
+ uids: [uid],
88
+ messages: [
89
+ { role: "system", content: this.systemPrompt },
90
+ ...processed_messages,
91
+ ],
92
+ };
93
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
94
+ method: "POST",
95
+ headers,
96
+ body: JSON.stringify(payload),
97
+ });
98
+ if (!response.ok) {
99
+ throw new Error("Network response was not ok");
100
+ }
101
+ const chatData = await response.json();
102
+ if (chatData.choices) {
103
+ const generation = {
104
+ text: chatData.choices[0].message.content,
105
+ message: this.stringToChatMessage(chatData.choices[0].message.content),
106
+ };
107
+ generations.push(generation);
108
+ return { generations, llmOutput: {} };
109
+ }
110
+ }
111
+ catch (error) {
112
+ continue;
113
+ }
114
+ }
115
+ }
116
+ }
117
+ catch (error) {
118
+ const generation = {
119
+ text: "Sorry I am unable to provide response now, Please try again later.",
120
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
121
+ };
122
+ generations.push(generation);
123
+ return { generations, llmOutput: {} };
124
+ }
125
+ const generation = {
126
+ text: "Sorry I am unable to provide response now, Please try again later.",
127
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
128
+ };
129
+ generations.push(generation);
130
+ return { generations, llmOutput: {} };
131
+ }
132
+ identifyingParams() {
133
+ return {
134
+ systemPrompt: this.systemPrompt,
135
+ };
136
+ }
137
+ }
@@ -56,9 +56,10 @@ class OpenAI extends base_js_1.BaseLLM {
56
56
  constructor(fields,
57
57
  /** @deprecated */
58
58
  configuration) {
59
- if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
59
+ if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
60
60
  fields?.modelName?.startsWith("gpt-4") ||
61
- fields?.modelName?.startsWith("gpt-4-32k")) {
61
+ fields?.modelName?.startsWith("gpt-4-32k")) &&
62
+ !fields?.modelName.endsWith("-instruct")) {
62
63
  // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
63
64
  return new openai_chat_js_1.OpenAIChat(fields, configuration);
64
65
  }
@@ -53,9 +53,10 @@ export class OpenAI extends BaseLLM {
53
53
  constructor(fields,
54
54
  /** @deprecated */
55
55
  configuration) {
56
- if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
56
+ if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
57
57
  fields?.modelName?.startsWith("gpt-4") ||
58
- fields?.modelName?.startsWith("gpt-4-32k")) {
58
+ fields?.modelName?.startsWith("gpt-4-32k")) &&
59
+ !fields?.modelName.endsWith("-instruct")) {
59
60
  // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
60
61
  return new OpenAIChat(fields, configuration);
61
62
  }
@@ -44,6 +44,12 @@ class Replicate extends base_js_1.LLM {
44
44
  writable: true,
45
45
  value: void 0
46
46
  });
47
+ Object.defineProperty(this, "promptKey", {
48
+ enumerable: true,
49
+ configurable: true,
50
+ writable: true,
51
+ value: void 0
52
+ });
47
53
  const apiKey = fields?.apiKey ??
48
54
  (0, env_js_1.getEnvironmentVariable)("REPLICATE_API_KEY") ?? // previous environment variable for backwards compatibility
49
55
  (0, env_js_1.getEnvironmentVariable)("REPLICATE_API_TOKEN"); // current environment variable, matching the Python library
@@ -53,6 +59,7 @@ class Replicate extends base_js_1.LLM {
53
59
  this.apiKey = apiKey;
54
60
  this.model = fields.model;
55
61
  this.input = fields.input ?? {};
62
+ this.promptKey = fields.promptKey;
56
63
  }
57
64
  _llmType() {
58
65
  return "replicate";
@@ -64,11 +71,30 @@ class Replicate extends base_js_1.LLM {
64
71
  userAgent: "langchain",
65
72
  auth: this.apiKey,
66
73
  });
74
+ if (this.promptKey === undefined) {
75
+ const [modelString, versionString] = this.model.split(":");
76
+ const version = await replicate.models.versions.get(modelString.split("/")[0], modelString.split("/")[1], versionString);
77
+ const openapiSchema = version.openapi_schema;
78
+ const inputProperties =
79
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
80
+ openapiSchema?.components?.schemas?.Input?.properties;
81
+ if (inputProperties === undefined) {
82
+ this.promptKey = "prompt";
83
+ }
84
+ else {
85
+ const sortedInputProperties = Object.entries(inputProperties).sort(([_keyA, valueA], [_keyB, valueB]) => {
86
+ const orderA = valueA["x-order"] || 0;
87
+ const orderB = valueB["x-order"] || 0;
88
+ return orderA - orderB;
89
+ });
90
+ this.promptKey = sortedInputProperties[0][0] ?? "prompt";
91
+ }
92
+ }
67
93
  const output = await this.caller.callWithOptions({ signal: options.signal }, () => replicate.run(this.model, {
68
- wait: true,
69
94
  input: {
95
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
96
+ [this.promptKey]: prompt,
70
97
  ...this.input,
71
- prompt,
72
98
  },
73
99
  }));
74
100
  if (typeof output === "string") {
@@ -10,6 +10,8 @@ export interface ReplicateInput {
10
10
  [key: string]: string | number | boolean;
11
11
  };
12
12
  apiKey?: string;
13
+ /** The key used to pass prompts to the model. */
14
+ promptKey?: string;
13
15
  }
14
16
  /**
15
17
  * Class responsible for managing the interaction with the Replicate API.
@@ -26,6 +28,7 @@ export declare class Replicate extends LLM implements ReplicateInput {
26
28
  model: ReplicateInput["model"];
27
29
  input: ReplicateInput["input"];
28
30
  apiKey: string;
31
+ promptKey?: string;
29
32
  constructor(fields: ReplicateInput & BaseLLMParams);
30
33
  _llmType(): string;
31
34
  /** @ignore */
@@ -41,6 +41,12 @@ export class Replicate extends LLM {
41
41
  writable: true,
42
42
  value: void 0
43
43
  });
44
+ Object.defineProperty(this, "promptKey", {
45
+ enumerable: true,
46
+ configurable: true,
47
+ writable: true,
48
+ value: void 0
49
+ });
44
50
  const apiKey = fields?.apiKey ??
45
51
  getEnvironmentVariable("REPLICATE_API_KEY") ?? // previous environment variable for backwards compatibility
46
52
  getEnvironmentVariable("REPLICATE_API_TOKEN"); // current environment variable, matching the Python library
@@ -50,6 +56,7 @@ export class Replicate extends LLM {
50
56
  this.apiKey = apiKey;
51
57
  this.model = fields.model;
52
58
  this.input = fields.input ?? {};
59
+ this.promptKey = fields.promptKey;
53
60
  }
54
61
  _llmType() {
55
62
  return "replicate";
@@ -61,11 +68,30 @@ export class Replicate extends LLM {
61
68
  userAgent: "langchain",
62
69
  auth: this.apiKey,
63
70
  });
71
+ if (this.promptKey === undefined) {
72
+ const [modelString, versionString] = this.model.split(":");
73
+ const version = await replicate.models.versions.get(modelString.split("/")[0], modelString.split("/")[1], versionString);
74
+ const openapiSchema = version.openapi_schema;
75
+ const inputProperties =
76
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
77
+ openapiSchema?.components?.schemas?.Input?.properties;
78
+ if (inputProperties === undefined) {
79
+ this.promptKey = "prompt";
80
+ }
81
+ else {
82
+ const sortedInputProperties = Object.entries(inputProperties).sort(([_keyA, valueA], [_keyB, valueB]) => {
83
+ const orderA = valueA["x-order"] || 0;
84
+ const orderB = valueB["x-order"] || 0;
85
+ return orderA - orderB;
86
+ });
87
+ this.promptKey = sortedInputProperties[0][0] ?? "prompt";
88
+ }
89
+ }
64
90
  const output = await this.caller.callWithOptions({ signal: options.signal }, () => replicate.run(this.model, {
65
- wait: true,
66
91
  input: {
92
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
93
+ [this.promptKey]: prompt,
67
94
  ...this.input,
68
- prompt,
69
95
  },
70
96
  }));
71
97
  if (typeof output === "string") {
@@ -48,6 +48,7 @@ exports.optionalImportEntrypoints = [
48
48
  "langchain/vectorstores/qdrant",
49
49
  "langchain/vectorstores/supabase",
50
50
  "langchain/vectorstores/opensearch",
51
+ "langchain/vectorstores/pgvector",
51
52
  "langchain/vectorstores/milvus",
52
53
  "langchain/vectorstores/typeorm",
53
54
  "langchain/vectorstores/myscale",
@@ -45,6 +45,7 @@ export const optionalImportEntrypoints = [
45
45
  "langchain/vectorstores/qdrant",
46
46
  "langchain/vectorstores/supabase",
47
47
  "langchain/vectorstores/opensearch",
48
+ "langchain/vectorstores/pgvector",
48
49
  "langchain/vectorstores/milvus",
49
50
  "langchain/vectorstores/typeorm",
50
51
  "langchain/vectorstores/myscale",
@@ -25,7 +25,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
27
  exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = void 0;
28
+ exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -94,4 +94,5 @@ exports.experimental__autogpt = __importStar(require("../experimental/autogpt/in
94
94
  exports.experimental__babyagi = __importStar(require("../experimental/babyagi/index.cjs"));
95
95
  exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
96
96
  exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
97
+ exports.experimental__chat_models__bittensor = __importStar(require("../experimental/chat_models/bittensor.cjs"));
97
98
  exports.evaluation = __importStar(require("../evaluation/index.cjs"));
@@ -66,4 +66,5 @@ export * as experimental__autogpt from "../experimental/autogpt/index.js";
66
66
  export * as experimental__babyagi from "../experimental/babyagi/index.js";
67
67
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
68
68
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
69
+ export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
69
70
  export * as evaluation from "../evaluation/index.js";
@@ -67,4 +67,5 @@ export * as experimental__autogpt from "../experimental/autogpt/index.js";
67
67
  export * as experimental__babyagi from "../experimental/babyagi/index.js";
68
68
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
69
69
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
70
+ export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
70
71
  export * as evaluation from "../evaluation/index.js";
@@ -99,6 +99,8 @@ class PromptTemplate extends base_js_1.BaseStringPromptTemplate {
99
99
  }
100
100
  });
101
101
  return new PromptTemplate({
102
+ // Rely on extracted types
103
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
102
104
  inputVariables: [...names],
103
105
  templateFormat,
104
106
  template,
@@ -32,7 +32,7 @@ type NonAlphanumeric = " " | "\t" | "\n" | "\r" | '"' | "'" | "{" | "[" | "(" |
32
32
  */
33
33
  type ExtractTemplateParamsRecursive<T extends string, Result extends string[] = []> = T extends `${string}{${infer Param}}${infer Rest}` ? Param extends `${NonAlphanumeric}${string}` ? ExtractTemplateParamsRecursive<Rest, Result> : ExtractTemplateParamsRecursive<Rest, [...Result, Param]> : Result;
34
34
  export type ParamsFromFString<T extends string> = {
35
- [Key in ExtractTemplateParamsRecursive<T>[number]]: string;
35
+ [Key in ExtractTemplateParamsRecursive<T>[number] | (string & Record<never, never>)]: string;
36
36
  };
37
37
  /**
38
38
  * Schema to represent a basic prompt for an LLM.
@@ -96,6 +96,8 @@ export class PromptTemplate extends BaseStringPromptTemplate {
96
96
  }
97
97
  });
98
98
  return new PromptTemplate({
99
+ // Rely on extracted types
100
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
99
101
  inputVariables: [...names],
100
102
  templateFormat,
101
103
  template,
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.RunnableWithFallbacks = exports.RunnableLambda = exports.RunnableMap = exports.RunnableSequence = exports.RunnableRetry = exports.RunnableEach = exports.RunnableBinding = exports.Runnable = void 0;
6
+ exports._coerceToRunnable = exports.RunnableWithFallbacks = exports.RunnableLambda = exports.RunnableMap = exports.RunnableSequence = exports.RunnableRetry = exports.RunnableEach = exports.RunnableBinding = exports.Runnable = void 0;
7
7
  const p_retry_1 = __importDefault(require("p-retry"));
8
8
  const manager_js_1 = require("../../callbacks/manager.cjs");
9
9
  const serializable_js_1 = require("../../load/serializable.cjs");
@@ -700,6 +700,7 @@ class RunnableSequence extends Runnable {
700
700
  static isRunnableSequence(thing) {
701
701
  return Array.isArray(thing.middle) && Runnable.isRunnable(thing);
702
702
  }
703
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
703
704
  static from([first, ...runnables]) {
704
705
  return new RunnableSequence({
705
706
  first: _coerceToRunnable(first),
@@ -789,8 +790,15 @@ class RunnableLambda extends Runnable {
789
790
  });
790
791
  this.func = fields.func;
791
792
  }
793
+ async _invoke(input, config, runManager) {
794
+ let output = await this.func(input);
795
+ if (output && Runnable.isRunnable(output)) {
796
+ output = await output.invoke(input, this._patchConfig(config, runManager?.getChild()));
797
+ }
798
+ return output;
799
+ }
792
800
  async invoke(input, options) {
793
- return this._callWithConfig(async (input) => this.func(input), input, options);
801
+ return this._callWithConfig(this._invoke, input, options);
794
802
  }
795
803
  }
796
804
  exports.RunnableLambda = RunnableLambda;
@@ -908,3 +916,4 @@ function _coerceToRunnable(coerceable) {
908
916
  throw new Error(`Expected a Runnable, function or object.\nInstead got an unsupported type.`);
909
917
  }
910
918
  }
919
+ exports._coerceToRunnable = _coerceToRunnable;
@@ -242,7 +242,7 @@ export declare class RunnableSequence<RunInput = any, RunOutput = any> extends R
242
242
  _streamIterator(input: RunInput, options?: RunnableConfig): AsyncGenerator<RunOutput>;
243
243
  pipe<NewRunOutput>(coerceable: RunnableLike<RunOutput, NewRunOutput>): RunnableSequence<RunInput, Exclude<NewRunOutput, Error>>;
244
244
  static isRunnableSequence(thing: any): thing is RunnableSequence;
245
- static from<RunInput, RunOutput>([first, ...runnables]: [
245
+ static from<RunInput = any, RunOutput = any>([first, ...runnables]: [
246
246
  RunnableLike<RunInput>,
247
247
  ...RunnableLike[],
248
248
  RunnableLike<any, RunOutput>
@@ -272,6 +272,7 @@ export declare class RunnableLambda<RunInput, RunOutput> extends Runnable<RunInp
272
272
  constructor(fields: {
273
273
  func: RunnableFunc<RunInput, RunOutput>;
274
274
  });
275
+ _invoke(input: RunInput, config?: Partial<BaseCallbackConfig>, runManager?: CallbackManagerForChainRun): Promise<RunOutput>;
275
276
  invoke(input: RunInput, options?: Partial<BaseCallbackConfig>): Promise<RunOutput>;
276
277
  }
277
278
  /**
@@ -297,4 +298,5 @@ export declare class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable
297
298
  }): Promise<(RunOutput | Error)[]>;
298
299
  batch(inputs: RunInput[], options?: Partial<BaseCallbackConfig> | Partial<BaseCallbackConfig>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;
299
300
  }
301
+ export declare function _coerceToRunnable<RunInput, RunOutput>(coerceable: RunnableLike<RunInput, RunOutput>): Runnable<RunInput, Exclude<RunOutput, Error>>;
300
302
  export {};
@@ -690,6 +690,7 @@ export class RunnableSequence extends Runnable {
690
690
  static isRunnableSequence(thing) {
691
691
  return Array.isArray(thing.middle) && Runnable.isRunnable(thing);
692
692
  }
693
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
693
694
  static from([first, ...runnables]) {
694
695
  return new RunnableSequence({
695
696
  first: _coerceToRunnable(first),
@@ -777,8 +778,15 @@ export class RunnableLambda extends Runnable {
777
778
  });
778
779
  this.func = fields.func;
779
780
  }
781
+ async _invoke(input, config, runManager) {
782
+ let output = await this.func(input);
783
+ if (output && Runnable.isRunnable(output)) {
784
+ output = await output.invoke(input, this._patchConfig(config, runManager?.getChild()));
785
+ }
786
+ return output;
787
+ }
780
788
  async invoke(input, options) {
781
- return this._callWithConfig(async (input) => this.func(input), input, options);
789
+ return this._callWithConfig(this._invoke, input, options);
782
790
  }
783
791
  }
784
792
  /**
@@ -874,7 +882,7 @@ export class RunnableWithFallbacks extends Runnable {
874
882
  }
875
883
  }
876
884
  // TODO: Figure out why the compiler needs help eliminating Error as a RunOutput type
877
- function _coerceToRunnable(coerceable) {
885
+ export function _coerceToRunnable(coerceable) {
878
886
  if (typeof coerceable === "function") {
879
887
  return new RunnableLambda({ func: coerceable });
880
888
  }