langchain 0.0.150 → 0.0.151

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,141 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.NIBittensorChatModel = void 0;
4
+ const base_js_1 = require("../../chat_models/base.cjs");
5
+ const index_js_1 = require("../../schema/index.cjs");
6
+ /**
7
+ * Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
8
+ * full of different AI models.s
9
+ * To analyze API_KEYS and logs of you usage visit
10
+ * https://api.neuralinternet.ai/api-keys
11
+ * https://api.neuralinternet.ai/logs
12
+ */
13
+ class NIBittensorChatModel extends base_js_1.BaseChatModel {
14
+ static lc_name() {
15
+ return "NIBittensorLLM";
16
+ }
17
+ constructor(fields) {
18
+ super(fields ?? {});
19
+ Object.defineProperty(this, "systemPrompt", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: void 0
24
+ });
25
+ this.systemPrompt =
26
+ fields?.systemPrompt ??
27
+ "You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
28
+ }
29
+ _combineLLMOutput() {
30
+ return [];
31
+ }
32
+ _llmType() {
33
+ return "NIBittensorLLM";
34
+ }
35
+ messageToOpenAIRole(message) {
36
+ const type = message._getType();
37
+ switch (type) {
38
+ case "system":
39
+ return "system";
40
+ case "ai":
41
+ return "assistant";
42
+ case "human":
43
+ return "user";
44
+ default:
45
+ return "user";
46
+ }
47
+ }
48
+ stringToChatMessage(message) {
49
+ return new index_js_1.ChatMessage(message, "assistant");
50
+ }
51
+ /** Call out to NIBittensorChatModel's complete endpoint.
52
+ Args:
53
+ messages: The messages to pass into the model.
54
+
55
+ Returns: The chat response by the model.
56
+
57
+ Example:
58
+ const chat = new NIBittensorChatModel();
59
+ const message = new HumanMessage('What is bittensor?');
60
+ const res = await chat.call([message]);
61
+ */
62
+ async _generate(messages) {
63
+ const processed_messages = messages.map((message) => ({
64
+ role: this.messageToOpenAIRole(message),
65
+ content: message.content,
66
+ }));
67
+ const generations = [];
68
+ try {
69
+ // Retrieve API KEY
70
+ const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
71
+ if (!apiKeyResponse.ok) {
72
+ throw new Error("Network response was not ok");
73
+ }
74
+ const apiKeysData = await apiKeyResponse.json();
75
+ const apiKey = apiKeysData[0].api_key;
76
+ const headers = {
77
+ "Content-Type": "application/json",
78
+ Authorization: `Bearer ${apiKey}`,
79
+ "Endpoint-Version": "2023-05-19",
80
+ };
81
+ const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
82
+ if (!minerResponse.ok) {
83
+ throw new Error("Network response was not ok");
84
+ }
85
+ const uids = await minerResponse.json();
86
+ if (Array.isArray(uids) && uids.length) {
87
+ for (const uid of uids) {
88
+ try {
89
+ const payload = {
90
+ uids: [uid],
91
+ messages: [
92
+ { role: "system", content: this.systemPrompt },
93
+ ...processed_messages,
94
+ ],
95
+ };
96
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
97
+ method: "POST",
98
+ headers,
99
+ body: JSON.stringify(payload),
100
+ });
101
+ if (!response.ok) {
102
+ throw new Error("Network response was not ok");
103
+ }
104
+ const chatData = await response.json();
105
+ if (chatData.choices) {
106
+ const generation = {
107
+ text: chatData.choices[0].message.content,
108
+ message: this.stringToChatMessage(chatData.choices[0].message.content),
109
+ };
110
+ generations.push(generation);
111
+ return { generations, llmOutput: {} };
112
+ }
113
+ }
114
+ catch (error) {
115
+ continue;
116
+ }
117
+ }
118
+ }
119
+ }
120
+ catch (error) {
121
+ const generation = {
122
+ text: "Sorry I am unable to provide response now, Please try again later.",
123
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
124
+ };
125
+ generations.push(generation);
126
+ return { generations, llmOutput: {} };
127
+ }
128
+ const generation = {
129
+ text: "Sorry I am unable to provide response now, Please try again later.",
130
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
131
+ };
132
+ generations.push(generation);
133
+ return { generations, llmOutput: {} };
134
+ }
135
+ identifyingParams() {
136
+ return {
137
+ systemPrompt: this.systemPrompt,
138
+ };
139
+ }
140
+ }
141
+ exports.NIBittensorChatModel = NIBittensorChatModel;
@@ -0,0 +1,36 @@
1
+ import { BaseChatModel, BaseChatModelParams } from "../../chat_models/base.js";
2
+ import { BaseMessage, ChatResult } from "../../schema/index.js";
3
+ export interface BittensorInput extends BaseChatModelParams {
4
+ systemPrompt?: string | null | undefined;
5
+ }
6
+ /**
7
+ * Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
8
+ * full of different AI models.s
9
+ * To analyze API_KEYS and logs of you usage visit
10
+ * https://api.neuralinternet.ai/api-keys
11
+ * https://api.neuralinternet.ai/logs
12
+ */
13
+ export declare class NIBittensorChatModel extends BaseChatModel implements BittensorInput {
14
+ static lc_name(): string;
15
+ systemPrompt: string;
16
+ constructor(fields?: BittensorInput);
17
+ _combineLLMOutput(): never[];
18
+ _llmType(): string;
19
+ messageToOpenAIRole(message: BaseMessage): "system" | "user" | "assistant";
20
+ stringToChatMessage(message: string): BaseMessage;
21
+ /** Call out to NIBittensorChatModel's complete endpoint.
22
+ Args:
23
+ messages: The messages to pass into the model.
24
+
25
+ Returns: The chat response by the model.
26
+
27
+ Example:
28
+ const chat = new NIBittensorChatModel();
29
+ const message = new HumanMessage('What is bittensor?');
30
+ const res = await chat.call([message]);
31
+ */
32
+ _generate(messages: BaseMessage[]): Promise<ChatResult>;
33
+ identifyingParams(): {
34
+ systemPrompt: string | null | undefined;
35
+ };
36
+ }
@@ -0,0 +1,137 @@
1
+ import { BaseChatModel } from "../../chat_models/base.js";
2
+ import { ChatMessage, } from "../../schema/index.js";
3
+ /**
4
+ * Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
5
+ * full of different AI models.s
6
+ * To analyze API_KEYS and logs of you usage visit
7
+ * https://api.neuralinternet.ai/api-keys
8
+ * https://api.neuralinternet.ai/logs
9
+ */
10
+ export class NIBittensorChatModel extends BaseChatModel {
11
+ static lc_name() {
12
+ return "NIBittensorLLM";
13
+ }
14
+ constructor(fields) {
15
+ super(fields ?? {});
16
+ Object.defineProperty(this, "systemPrompt", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: void 0
21
+ });
22
+ this.systemPrompt =
23
+ fields?.systemPrompt ??
24
+ "You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
25
+ }
26
+ _combineLLMOutput() {
27
+ return [];
28
+ }
29
+ _llmType() {
30
+ return "NIBittensorLLM";
31
+ }
32
+ messageToOpenAIRole(message) {
33
+ const type = message._getType();
34
+ switch (type) {
35
+ case "system":
36
+ return "system";
37
+ case "ai":
38
+ return "assistant";
39
+ case "human":
40
+ return "user";
41
+ default:
42
+ return "user";
43
+ }
44
+ }
45
+ stringToChatMessage(message) {
46
+ return new ChatMessage(message, "assistant");
47
+ }
48
+ /** Call out to NIBittensorChatModel's complete endpoint.
49
+ Args:
50
+ messages: The messages to pass into the model.
51
+
52
+ Returns: The chat response by the model.
53
+
54
+ Example:
55
+ const chat = new NIBittensorChatModel();
56
+ const message = new HumanMessage('What is bittensor?');
57
+ const res = await chat.call([message]);
58
+ */
59
+ async _generate(messages) {
60
+ const processed_messages = messages.map((message) => ({
61
+ role: this.messageToOpenAIRole(message),
62
+ content: message.content,
63
+ }));
64
+ const generations = [];
65
+ try {
66
+ // Retrieve API KEY
67
+ const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
68
+ if (!apiKeyResponse.ok) {
69
+ throw new Error("Network response was not ok");
70
+ }
71
+ const apiKeysData = await apiKeyResponse.json();
72
+ const apiKey = apiKeysData[0].api_key;
73
+ const headers = {
74
+ "Content-Type": "application/json",
75
+ Authorization: `Bearer ${apiKey}`,
76
+ "Endpoint-Version": "2023-05-19",
77
+ };
78
+ const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
79
+ if (!minerResponse.ok) {
80
+ throw new Error("Network response was not ok");
81
+ }
82
+ const uids = await minerResponse.json();
83
+ if (Array.isArray(uids) && uids.length) {
84
+ for (const uid of uids) {
85
+ try {
86
+ const payload = {
87
+ uids: [uid],
88
+ messages: [
89
+ { role: "system", content: this.systemPrompt },
90
+ ...processed_messages,
91
+ ],
92
+ };
93
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
94
+ method: "POST",
95
+ headers,
96
+ body: JSON.stringify(payload),
97
+ });
98
+ if (!response.ok) {
99
+ throw new Error("Network response was not ok");
100
+ }
101
+ const chatData = await response.json();
102
+ if (chatData.choices) {
103
+ const generation = {
104
+ text: chatData.choices[0].message.content,
105
+ message: this.stringToChatMessage(chatData.choices[0].message.content),
106
+ };
107
+ generations.push(generation);
108
+ return { generations, llmOutput: {} };
109
+ }
110
+ }
111
+ catch (error) {
112
+ continue;
113
+ }
114
+ }
115
+ }
116
+ }
117
+ catch (error) {
118
+ const generation = {
119
+ text: "Sorry I am unable to provide response now, Please try again later.",
120
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
121
+ };
122
+ generations.push(generation);
123
+ return { generations, llmOutput: {} };
124
+ }
125
+ const generation = {
126
+ text: "Sorry I am unable to provide response now, Please try again later.",
127
+ message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
128
+ };
129
+ generations.push(generation);
130
+ return { generations, llmOutput: {} };
131
+ }
132
+ identifyingParams() {
133
+ return {
134
+ systemPrompt: this.systemPrompt,
135
+ };
136
+ }
137
+ }
@@ -56,9 +56,10 @@ class OpenAI extends base_js_1.BaseLLM {
56
56
  constructor(fields,
57
57
  /** @deprecated */
58
58
  configuration) {
59
- if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
59
+ if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
60
60
  fields?.modelName?.startsWith("gpt-4") ||
61
- fields?.modelName?.startsWith("gpt-4-32k")) {
61
+ fields?.modelName?.startsWith("gpt-4-32k")) &&
62
+ !fields?.modelName.endsWith("-instruct")) {
62
63
  // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
63
64
  return new openai_chat_js_1.OpenAIChat(fields, configuration);
64
65
  }
@@ -53,9 +53,10 @@ export class OpenAI extends BaseLLM {
53
53
  constructor(fields,
54
54
  /** @deprecated */
55
55
  configuration) {
56
- if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
56
+ if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
57
57
  fields?.modelName?.startsWith("gpt-4") ||
58
- fields?.modelName?.startsWith("gpt-4-32k")) {
58
+ fields?.modelName?.startsWith("gpt-4-32k")) &&
59
+ !fields?.modelName.endsWith("-instruct")) {
59
60
  // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
60
61
  return new OpenAIChat(fields, configuration);
61
62
  }
@@ -48,6 +48,7 @@ exports.optionalImportEntrypoints = [
48
48
  "langchain/vectorstores/qdrant",
49
49
  "langchain/vectorstores/supabase",
50
50
  "langchain/vectorstores/opensearch",
51
+ "langchain/vectorstores/pgvector",
51
52
  "langchain/vectorstores/milvus",
52
53
  "langchain/vectorstores/typeorm",
53
54
  "langchain/vectorstores/myscale",
@@ -45,6 +45,7 @@ export const optionalImportEntrypoints = [
45
45
  "langchain/vectorstores/qdrant",
46
46
  "langchain/vectorstores/supabase",
47
47
  "langchain/vectorstores/opensearch",
48
+ "langchain/vectorstores/pgvector",
48
49
  "langchain/vectorstores/milvus",
49
50
  "langchain/vectorstores/typeorm",
50
51
  "langchain/vectorstores/myscale",
@@ -25,7 +25,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
27
  exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = void 0;
28
+ exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -94,4 +94,5 @@ exports.experimental__autogpt = __importStar(require("../experimental/autogpt/in
94
94
  exports.experimental__babyagi = __importStar(require("../experimental/babyagi/index.cjs"));
95
95
  exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
96
96
  exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
97
+ exports.experimental__chat_models__bittensor = __importStar(require("../experimental/chat_models/bittensor.cjs"));
97
98
  exports.evaluation = __importStar(require("../evaluation/index.cjs"));
@@ -66,4 +66,5 @@ export * as experimental__autogpt from "../experimental/autogpt/index.js";
66
66
  export * as experimental__babyagi from "../experimental/babyagi/index.js";
67
67
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
68
68
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
69
+ export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
69
70
  export * as evaluation from "../evaluation/index.js";
@@ -67,4 +67,5 @@ export * as experimental__autogpt from "../experimental/autogpt/index.js";
67
67
  export * as experimental__babyagi from "../experimental/babyagi/index.js";
68
68
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
69
69
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
70
+ export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
70
71
  export * as evaluation from "../evaluation/index.js";
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.RunnableWithFallbacks = exports.RunnableLambda = exports.RunnableMap = exports.RunnableSequence = exports.RunnableRetry = exports.RunnableEach = exports.RunnableBinding = exports.Runnable = void 0;
6
+ exports._coerceToRunnable = exports.RunnableWithFallbacks = exports.RunnableLambda = exports.RunnableMap = exports.RunnableSequence = exports.RunnableRetry = exports.RunnableEach = exports.RunnableBinding = exports.Runnable = void 0;
7
7
  const p_retry_1 = __importDefault(require("p-retry"));
8
8
  const manager_js_1 = require("../../callbacks/manager.cjs");
9
9
  const serializable_js_1 = require("../../load/serializable.cjs");
@@ -790,8 +790,15 @@ class RunnableLambda extends Runnable {
790
790
  });
791
791
  this.func = fields.func;
792
792
  }
793
+ async _invoke(input, config, runManager) {
794
+ let output = await this.func(input);
795
+ if (output && Runnable.isRunnable(output)) {
796
+ output = await output.invoke(input, this._patchConfig(config, runManager?.getChild()));
797
+ }
798
+ return output;
799
+ }
793
800
  async invoke(input, options) {
794
- return this._callWithConfig(async (input) => this.func(input), input, options);
801
+ return this._callWithConfig(this._invoke, input, options);
795
802
  }
796
803
  }
797
804
  exports.RunnableLambda = RunnableLambda;
@@ -909,3 +916,4 @@ function _coerceToRunnable(coerceable) {
909
916
  throw new Error(`Expected a Runnable, function or object.\nInstead got an unsupported type.`);
910
917
  }
911
918
  }
919
+ exports._coerceToRunnable = _coerceToRunnable;
@@ -272,6 +272,7 @@ export declare class RunnableLambda<RunInput, RunOutput> extends Runnable<RunInp
272
272
  constructor(fields: {
273
273
  func: RunnableFunc<RunInput, RunOutput>;
274
274
  });
275
+ _invoke(input: RunInput, config?: Partial<BaseCallbackConfig>, runManager?: CallbackManagerForChainRun): Promise<RunOutput>;
275
276
  invoke(input: RunInput, options?: Partial<BaseCallbackConfig>): Promise<RunOutput>;
276
277
  }
277
278
  /**
@@ -297,4 +298,5 @@ export declare class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable
297
298
  }): Promise<(RunOutput | Error)[]>;
298
299
  batch(inputs: RunInput[], options?: Partial<BaseCallbackConfig> | Partial<BaseCallbackConfig>[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>;
299
300
  }
301
+ export declare function _coerceToRunnable<RunInput, RunOutput>(coerceable: RunnableLike<RunInput, RunOutput>): Runnable<RunInput, Exclude<RunOutput, Error>>;
300
302
  export {};
@@ -778,8 +778,15 @@ export class RunnableLambda extends Runnable {
778
778
  });
779
779
  this.func = fields.func;
780
780
  }
781
+ async _invoke(input, config, runManager) {
782
+ let output = await this.func(input);
783
+ if (output && Runnable.isRunnable(output)) {
784
+ output = await output.invoke(input, this._patchConfig(config, runManager?.getChild()));
785
+ }
786
+ return output;
787
+ }
781
788
  async invoke(input, options) {
782
- return this._callWithConfig(async (input) => this.func(input), input, options);
789
+ return this._callWithConfig(this._invoke, input, options);
783
790
  }
784
791
  }
785
792
  /**
@@ -875,7 +882,7 @@ export class RunnableWithFallbacks extends Runnable {
875
882
  }
876
883
  }
877
884
  // TODO: Figure out why the compiler needs help eliminating Error as a RunOutput type
878
- function _coerceToRunnable(coerceable) {
885
+ export function _coerceToRunnable(coerceable) {
879
886
  if (typeof coerceable === "function") {
880
887
  return new RunnableLambda({ func: coerceable });
881
888
  }
@@ -0,0 +1,106 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.RunnableBranch = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ /**
6
+ * Class that represents a runnable branch. The RunnableBranch is
7
+ * initialized with an array of branches and a default branch. When invoked,
8
+ * it evaluates the condition of each branch in order and executes the
9
+ * corresponding branch if the condition is true. If none of the conditions
10
+ * are true, it executes the default branch.
11
+ */
12
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
13
+ class RunnableBranch extends base_js_1.Runnable {
14
+ static lc_name() {
15
+ return "RunnableBranch";
16
+ }
17
+ constructor(fields) {
18
+ super(fields);
19
+ Object.defineProperty(this, "lc_namespace", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: ["langchain", "runnable", "branch"]
24
+ });
25
+ Object.defineProperty(this, "lc_serializable", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: true
30
+ });
31
+ Object.defineProperty(this, "default", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: void 0
36
+ });
37
+ Object.defineProperty(this, "branches", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: void 0
42
+ });
43
+ this.branches = fields.branches;
44
+ this.default = fields.default;
45
+ }
46
+ /**
47
+ * Convenience method for instantiating a RunnableBranch from
48
+ * RunnableLikes (objects, functions, or Runnables).
49
+ *
50
+ * Each item in the input except for the last one should be a
51
+ * tuple with two items. The first is a "condition" RunnableLike that
52
+ * returns "true" if the second RunnableLike in the tuple should run.
53
+ *
54
+ * The final item in the input should be a RunnableLike that acts as a
55
+ * default branch if no other branches match.
56
+ *
57
+ * @example
58
+ * ```ts
59
+ * import { RunnableBranch } from "langchain/schema/runnable";
60
+ *
61
+ * const branch = RunnableBranch.from([
62
+ * [(x: number) => x > 0, (x: number) => x + 1],
63
+ * [(x: number) => x < 0, (x: number) => x - 1],
64
+ * (x: number) => x
65
+ * ]);
66
+ * ```
67
+ * @param branches An array where the every item except the last is a tuple of [condition, runnable]
68
+ * pairs. The last item is a default runnable which is invoked if no other condition matches.
69
+ * @returns A new RunnableBranch.
70
+ */
71
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
72
+ static from(branches) {
73
+ if (branches.length < 1) {
74
+ throw new Error("RunnableBranch requires at least one branch");
75
+ }
76
+ const branchLikes = branches.slice(0, -1);
77
+ const coercedBranches = branchLikes.map(([condition, runnable]) => [
78
+ (0, base_js_1._coerceToRunnable)(condition),
79
+ (0, base_js_1._coerceToRunnable)(runnable),
80
+ ]);
81
+ const defaultBranch = (0, base_js_1._coerceToRunnable)(branches[branches.length - 1]);
82
+ return new this({
83
+ branches: coercedBranches,
84
+ default: defaultBranch,
85
+ });
86
+ }
87
+ async _invoke(input, config, runManager) {
88
+ let result;
89
+ for (let i = 0; i < this.branches.length; i += 1) {
90
+ const [condition, branchRunnable] = this.branches[i];
91
+ const conditionValue = await condition.invoke(input, this._patchConfig(config, runManager?.getChild(`condition:${i + 1}`)));
92
+ if (conditionValue) {
93
+ result = await branchRunnable.invoke(input, this._patchConfig(config, runManager?.getChild(`branch:${i + 1}`)));
94
+ break;
95
+ }
96
+ }
97
+ if (!result) {
98
+ result = await this.default.invoke(input, this._patchConfig(config, runManager?.getChild("default")));
99
+ }
100
+ return result;
101
+ }
102
+ async invoke(input, config = {}) {
103
+ return this._callWithConfig(this._invoke, input, config);
104
+ }
105
+ }
106
+ exports.RunnableBranch = RunnableBranch;
@@ -0,0 +1,66 @@
1
+ import { Runnable, RunnableLike } from "./base.js";
2
+ import { RunnableConfig } from "./config.js";
3
+ import { CallbackManagerForChainRun } from "../../callbacks/manager.js";
4
+ /**
5
+ * Type for a branch in the RunnableBranch. It consists of a condition
6
+ * runnable and a branch runnable. The condition runnable is used to
7
+ * determine whether the branch should be executed, and the branch runnable
8
+ * is executed if the condition is true.
9
+ */
10
+ export type Branch<RunInput, RunOutput> = [
11
+ Runnable<RunInput, boolean>,
12
+ Runnable<RunInput, RunOutput>
13
+ ];
14
+ export type BranchLike<RunInput, RunOutput> = [
15
+ RunnableLike<RunInput, boolean>,
16
+ RunnableLike<RunInput, RunOutput>
17
+ ];
18
+ /**
19
+ * Class that represents a runnable branch. The RunnableBranch is
20
+ * initialized with an array of branches and a default branch. When invoked,
21
+ * it evaluates the condition of each branch in order and executes the
22
+ * corresponding branch if the condition is true. If none of the conditions
23
+ * are true, it executes the default branch.
24
+ */
25
+ export declare class RunnableBranch<RunInput = any, RunOutput = any> extends Runnable<RunInput, RunOutput> {
26
+ static lc_name(): string;
27
+ lc_namespace: string[];
28
+ lc_serializable: boolean;
29
+ default: Runnable<RunInput, RunOutput>;
30
+ branches: Branch<RunInput, RunOutput>[];
31
+ constructor(fields: {
32
+ branches: Branch<RunInput, RunOutput>[];
33
+ default: Runnable<RunInput, RunOutput>;
34
+ });
35
+ /**
36
+ * Convenience method for instantiating a RunnableBranch from
37
+ * RunnableLikes (objects, functions, or Runnables).
38
+ *
39
+ * Each item in the input except for the last one should be a
40
+ * tuple with two items. The first is a "condition" RunnableLike that
41
+ * returns "true" if the second RunnableLike in the tuple should run.
42
+ *
43
+ * The final item in the input should be a RunnableLike that acts as a
44
+ * default branch if no other branches match.
45
+ *
46
+ * @example
47
+ * ```ts
48
+ * import { RunnableBranch } from "langchain/schema/runnable";
49
+ *
50
+ * const branch = RunnableBranch.from([
51
+ * [(x: number) => x > 0, (x: number) => x + 1],
52
+ * [(x: number) => x < 0, (x: number) => x - 1],
53
+ * (x: number) => x
54
+ * ]);
55
+ * ```
56
+ * @param branches An array where the every item except the last is a tuple of [condition, runnable]
57
+ * pairs. The last item is a default runnable which is invoked if no other condition matches.
58
+ * @returns A new RunnableBranch.
59
+ */
60
+ static from<RunInput = any, RunOutput = any>(branches: [
61
+ ...BranchLike<RunInput, RunOutput>[],
62
+ RunnableLike<RunInput, RunOutput>
63
+ ]): RunnableBranch<RunInput, RunOutput>;
64
+ _invoke(input: RunInput, config?: Partial<RunnableConfig>, runManager?: CallbackManagerForChainRun): Promise<RunOutput>;
65
+ invoke(input: RunInput, config?: RunnableConfig): Promise<RunOutput>;
66
+ }