langchain 0.0.71 → 0.0.73
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/index.cjs +2 -1
- package/dist/agents/index.d.ts +1 -1
- package/dist/agents/index.js +1 -1
- package/dist/chains/base.cjs +1 -1
- package/dist/chains/base.js +1 -1
- package/dist/chains/conversation.cjs +3 -3
- package/dist/chains/conversation.d.ts +1 -0
- package/dist/chains/conversation.js +2 -2
- package/dist/chains/index.cjs +10 -1
- package/dist/chains/index.d.ts +4 -0
- package/dist/chains/index.js +4 -0
- package/dist/chains/retrieval_qa.cjs +3 -1
- package/dist/chains/retrieval_qa.d.ts +2 -1
- package/dist/chains/retrieval_qa.js +4 -2
- package/dist/chains/router/llm_router.cjs +31 -0
- package/dist/chains/router/llm_router.d.ts +24 -0
- package/dist/chains/router/llm_router.js +27 -0
- package/dist/chains/router/multi_prompt.cjs +76 -0
- package/dist/chains/router/multi_prompt.d.ts +8 -0
- package/dist/chains/router/multi_prompt.js +72 -0
- package/dist/chains/router/multi_prompt_prompt.cjs +42 -0
- package/dist/chains/router/multi_prompt_prompt.d.ts +2 -0
- package/dist/chains/router/multi_prompt_prompt.js +38 -0
- package/dist/chains/router/multi_retrieval_prompt.cjs +42 -0
- package/dist/chains/router/multi_retrieval_prompt.d.ts +2 -0
- package/dist/chains/router/multi_retrieval_prompt.js +38 -0
- package/dist/chains/router/multi_retrieval_qa.cjs +89 -0
- package/dist/chains/router/multi_retrieval_qa.d.ts +15 -0
- package/dist/chains/router/multi_retrieval_qa.js +85 -0
- package/dist/chains/router/multi_route.cjs +86 -0
- package/dist/chains/router/multi_route.d.ts +38 -0
- package/dist/chains/router/multi_route.js +81 -0
- package/dist/chains/router/utils.cjs +34 -0
- package/dist/chains/router/utils.d.ts +3 -0
- package/dist/chains/router/utils.js +30 -0
- package/dist/chat_models/openai.cjs +33 -19
- package/dist/chat_models/openai.d.ts +1 -1
- package/dist/chat_models/openai.js +33 -19
- package/dist/embeddings/openai.d.ts +1 -1
- package/dist/llms/openai-chat.cjs +31 -19
- package/dist/llms/openai-chat.d.ts +1 -1
- package/dist/llms/openai-chat.js +31 -19
- package/dist/llms/openai.cjs +29 -9
- package/dist/llms/openai.d.ts +1 -1
- package/dist/llms/openai.js +29 -9
- package/dist/output_parsers/index.cjs +6 -1
- package/dist/output_parsers/index.d.ts +3 -1
- package/dist/output_parsers/index.js +3 -1
- package/dist/output_parsers/list.cjs +46 -1
- package/dist/output_parsers/list.d.ts +14 -0
- package/dist/output_parsers/list.js +44 -0
- package/dist/output_parsers/router.cjs +32 -0
- package/dist/output_parsers/router.d.ts +11 -0
- package/dist/output_parsers/router.js +28 -0
- package/dist/output_parsers/structured.cjs +43 -3
- package/dist/output_parsers/structured.d.ts +11 -1
- package/dist/output_parsers/structured.js +41 -2
- package/dist/schema/index.cjs +10 -1
- package/dist/schema/index.d.ts +5 -0
- package/dist/schema/index.js +8 -0
- package/dist/schema/output_parser.d.ts +7 -1
- package/dist/stores/message/dynamodb.cjs +126 -0
- package/dist/stores/message/dynamodb.d.ts +23 -0
- package/dist/stores/message/dynamodb.js +122 -0
- package/dist/stores/message/in_memory.cjs +3 -6
- package/dist/stores/message/in_memory.d.ts +3 -4
- package/dist/stores/message/in_memory.js +4 -7
- package/dist/stores/message/utils.cjs +31 -0
- package/dist/stores/message/utils.d.ts +8 -0
- package/dist/stores/message/utils.js +26 -0
- package/dist/types/openai-types.cjs +2 -0
- package/dist/types/openai-types.d.ts +101 -0
- package/dist/types/openai-types.js +1 -0
- package/package.json +14 -1
- package/stores/message/dynamodb.cjs +1 -0
- package/stores/message/dynamodb.d.ts +1 -0
- package/stores/message/dynamodb.js +1 -0
package/dist/agents/index.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreAgent = exports.createSqlAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.SqlToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
3
|
+
exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createSqlAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.SqlToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
4
4
|
var agent_js_1 = require("./agent.cjs");
|
|
5
5
|
Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_js_1.Agent; } });
|
|
6
6
|
Object.defineProperty(exports, "BaseSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseSingleActionAgent; } });
|
|
@@ -17,6 +17,7 @@ Object.defineProperty(exports, "createJsonAgent", { enumerable: true, get: funct
|
|
|
17
17
|
Object.defineProperty(exports, "createOpenApiAgent", { enumerable: true, get: function () { return index_js_1.createOpenApiAgent; } });
|
|
18
18
|
Object.defineProperty(exports, "createSqlAgent", { enumerable: true, get: function () { return index_js_1.createSqlAgent; } });
|
|
19
19
|
Object.defineProperty(exports, "createVectorStoreAgent", { enumerable: true, get: function () { return index_js_1.createVectorStoreAgent; } });
|
|
20
|
+
Object.defineProperty(exports, "createVectorStoreRouterAgent", { enumerable: true, get: function () { return index_js_1.createVectorStoreRouterAgent; } });
|
|
20
21
|
var base_js_1 = require("./agent_toolkits/base.cjs");
|
|
21
22
|
Object.defineProperty(exports, "Toolkit", { enumerable: true, get: function () { return base_js_1.Toolkit; } });
|
|
22
23
|
var index_js_2 = require("./chat/index.cjs");
|
package/dist/agents/index.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
export { Agent, AgentArgs, BaseSingleActionAgent, LLMSingleActionAgent, LLMSingleActionAgentInput, OutputParserArgs, } from "./agent.js";
|
|
2
|
-
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, SqlToolkit, VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createSqlAgent, SqlCreatePromptArgs, createVectorStoreAgent, } from "./agent_toolkits/index.js";
|
|
2
|
+
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, SqlToolkit, VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createSqlAgent, SqlCreatePromptArgs, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./agent_toolkits/index.js";
|
|
3
3
|
export { Toolkit } from "./agent_toolkits/base.js";
|
|
4
4
|
export { ChatAgent, ChatAgentInput, ChatCreatePromptArgs, } from "./chat/index.js";
|
|
5
5
|
export { ChatAgentOutputParser } from "./chat/outputParser.js";
|
package/dist/agents/index.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
export { Agent, BaseSingleActionAgent, LLMSingleActionAgent, } from "./agent.js";
|
|
2
|
-
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, SqlToolkit, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createSqlAgent, createVectorStoreAgent, } from "./agent_toolkits/index.js";
|
|
2
|
+
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, SqlToolkit, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createSqlAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./agent_toolkits/index.js";
|
|
3
3
|
export { Toolkit } from "./agent_toolkits/base.js";
|
|
4
4
|
export { ChatAgent, } from "./chat/index.js";
|
|
5
5
|
export { ChatAgentOutputParser } from "./chat/outputParser.js";
|
package/dist/chains/base.cjs
CHANGED
|
@@ -71,10 +71,10 @@ class BaseChain extends index_js_2.BaseLangChain {
|
|
|
71
71
|
await runManager?.handleChainError(e);
|
|
72
72
|
throw e;
|
|
73
73
|
}
|
|
74
|
-
await runManager?.handleChainEnd(outputValues);
|
|
75
74
|
if (!(this.memory == null)) {
|
|
76
75
|
await this.memory.saveContext(values, outputValues);
|
|
77
76
|
}
|
|
77
|
+
await runManager?.handleChainEnd(outputValues);
|
|
78
78
|
// add the runManager's currentRunId to the outputValues
|
|
79
79
|
Object.defineProperty(outputValues, index_js_1.RUN_KEY, {
|
|
80
80
|
value: runManager ? { runId: runManager?.runId } : undefined,
|
package/dist/chains/base.js
CHANGED
|
@@ -68,10 +68,10 @@ export class BaseChain extends BaseLangChain {
|
|
|
68
68
|
await runManager?.handleChainError(e);
|
|
69
69
|
throw e;
|
|
70
70
|
}
|
|
71
|
-
await runManager?.handleChainEnd(outputValues);
|
|
72
71
|
if (!(this.memory == null)) {
|
|
73
72
|
await this.memory.saveContext(values, outputValues);
|
|
74
73
|
}
|
|
74
|
+
await runManager?.handleChainEnd(outputValues);
|
|
75
75
|
// add the runManager's currentRunId to the outputValues
|
|
76
76
|
Object.defineProperty(outputValues, RUN_KEY, {
|
|
77
77
|
value: runManager ? { runId: runManager?.runId } : undefined,
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.ConversationChain = void 0;
|
|
3
|
+
exports.ConversationChain = exports.DEFAULT_TEMPLATE = void 0;
|
|
4
4
|
const llm_chain_js_1 = require("./llm_chain.cjs");
|
|
5
5
|
const prompt_js_1 = require("../prompts/prompt.cjs");
|
|
6
6
|
const buffer_memory_js_1 = require("../memory/buffer_memory.cjs");
|
|
7
|
-
|
|
7
|
+
exports.DEFAULT_TEMPLATE = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
|
8
8
|
|
|
9
9
|
Current conversation:
|
|
10
10
|
{history}
|
|
@@ -15,7 +15,7 @@ class ConversationChain extends llm_chain_js_1.LLMChain {
|
|
|
15
15
|
super({
|
|
16
16
|
prompt: prompt ??
|
|
17
17
|
new prompt_js_1.PromptTemplate({
|
|
18
|
-
template:
|
|
18
|
+
template: exports.DEFAULT_TEMPLATE,
|
|
19
19
|
inputVariables: ["history", "input"],
|
|
20
20
|
}),
|
|
21
21
|
outputKey: outputKey ?? "response",
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { LLMChain, LLMChainInput } from "./llm_chain.js";
|
|
2
2
|
import { Optional } from "../types/type-utils.js";
|
|
3
|
+
export declare const DEFAULT_TEMPLATE = "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:";
|
|
3
4
|
export declare class ConversationChain extends LLMChain {
|
|
4
5
|
constructor({ prompt, outputKey, memory, ...rest }: Optional<LLMChainInput, "prompt">);
|
|
5
6
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { LLMChain } from "./llm_chain.js";
|
|
2
2
|
import { PromptTemplate } from "../prompts/prompt.js";
|
|
3
3
|
import { BufferMemory } from "../memory/buffer_memory.js";
|
|
4
|
-
const
|
|
4
|
+
export const DEFAULT_TEMPLATE = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
|
5
5
|
|
|
6
6
|
Current conversation:
|
|
7
7
|
{history}
|
|
@@ -12,7 +12,7 @@ export class ConversationChain extends LLMChain {
|
|
|
12
12
|
super({
|
|
13
13
|
prompt: prompt ??
|
|
14
14
|
new PromptTemplate({
|
|
15
|
-
template:
|
|
15
|
+
template: DEFAULT_TEMPLATE,
|
|
16
16
|
inputVariables: ["history", "input"],
|
|
17
17
|
}),
|
|
18
18
|
outputKey: outputKey ?? "response",
|
package/dist/chains/index.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.OpenAIModerationChain = exports.PRINCIPLES = exports.ConstitutionalPrinciple = exports.ConstitutionalChain = exports.RetrievalQAChain = exports.ConversationalRetrievalQAChain = exports.SqlDatabaseChain = exports.loadSummarizationChain = exports.loadQARefineChain = exports.loadQAMapReduceChain = exports.loadQAStuffChain = exports.loadQAChain = exports.VectorDBQAChain = exports.AnalyzeDocumentChain = exports.ChatVectorDBQAChain = exports.RefineDocumentsChain = exports.MapReduceDocumentsChain = exports.StuffDocumentsChain = exports.SimpleSequentialChain = exports.SequentialChain = exports.ConversationChain = exports.LLMChain = exports.BaseChain = void 0;
|
|
3
|
+
exports.MultiRetrievalQAChain = exports.MultiPromptChain = exports.LLMRouterChain = exports.RouterChain = exports.MultiRouteChain = exports.OpenAIModerationChain = exports.PRINCIPLES = exports.ConstitutionalPrinciple = exports.ConstitutionalChain = exports.RetrievalQAChain = exports.ConversationalRetrievalQAChain = exports.SqlDatabaseChain = exports.loadSummarizationChain = exports.loadQARefineChain = exports.loadQAMapReduceChain = exports.loadQAStuffChain = exports.loadQAChain = exports.VectorDBQAChain = exports.AnalyzeDocumentChain = exports.ChatVectorDBQAChain = exports.RefineDocumentsChain = exports.MapReduceDocumentsChain = exports.StuffDocumentsChain = exports.SimpleSequentialChain = exports.SequentialChain = exports.ConversationChain = exports.LLMChain = exports.BaseChain = void 0;
|
|
4
4
|
var base_js_1 = require("./base.cjs");
|
|
5
5
|
Object.defineProperty(exports, "BaseChain", { enumerable: true, get: function () { return base_js_1.BaseChain; } });
|
|
6
6
|
var llm_chain_js_1 = require("./llm_chain.cjs");
|
|
@@ -40,3 +40,12 @@ Object.defineProperty(exports, "ConstitutionalPrinciple", { enumerable: true, ge
|
|
|
40
40
|
Object.defineProperty(exports, "PRINCIPLES", { enumerable: true, get: function () { return constitutional_principle_js_1.PRINCIPLES; } });
|
|
41
41
|
var openai_moderation_js_1 = require("./openai_moderation.cjs");
|
|
42
42
|
Object.defineProperty(exports, "OpenAIModerationChain", { enumerable: true, get: function () { return openai_moderation_js_1.OpenAIModerationChain; } });
|
|
43
|
+
var multi_route_js_1 = require("./router/multi_route.cjs");
|
|
44
|
+
Object.defineProperty(exports, "MultiRouteChain", { enumerable: true, get: function () { return multi_route_js_1.MultiRouteChain; } });
|
|
45
|
+
Object.defineProperty(exports, "RouterChain", { enumerable: true, get: function () { return multi_route_js_1.RouterChain; } });
|
|
46
|
+
var llm_router_js_1 = require("./router/llm_router.cjs");
|
|
47
|
+
Object.defineProperty(exports, "LLMRouterChain", { enumerable: true, get: function () { return llm_router_js_1.LLMRouterChain; } });
|
|
48
|
+
var multi_prompt_js_1 = require("./router/multi_prompt.cjs");
|
|
49
|
+
Object.defineProperty(exports, "MultiPromptChain", { enumerable: true, get: function () { return multi_prompt_js_1.MultiPromptChain; } });
|
|
50
|
+
var multi_retrieval_qa_js_1 = require("./router/multi_retrieval_qa.cjs");
|
|
51
|
+
Object.defineProperty(exports, "MultiRetrievalQAChain", { enumerable: true, get: function () { return multi_retrieval_qa_js_1.MultiRetrievalQAChain; } });
|
package/dist/chains/index.d.ts
CHANGED
|
@@ -15,3 +15,7 @@ export { ConstitutionalChainInput, ConstitutionalChain, } from "./constitutional
|
|
|
15
15
|
export { ConstitutionalPrinciple, PRINCIPLES, } from "./constitutional_ai/constitutional_principle.js";
|
|
16
16
|
export { SerializedLLMChain, SerializedSequentialChain, SerializedSimpleSequentialChain, SerializedSqlDatabaseChain, SerializedAnalyzeDocumentChain, SerializedBaseChain, SerializedChatVectorDBQAChain, SerializedMapReduceDocumentsChain, SerializedStuffDocumentsChain, SerializedVectorDBQAChain, SerializedRefineDocumentsChain, } from "./serde.js";
|
|
17
17
|
export { OpenAIModerationChain } from "./openai_moderation.js";
|
|
18
|
+
export { MultiRouteChain, MultiRouteChainInput, RouterChain, } from "./router/multi_route.js";
|
|
19
|
+
export { LLMRouterChain, LLMRouterChainInput, RouterOutputSchema, } from "./router/llm_router.js";
|
|
20
|
+
export { MultiPromptChain } from "./router/multi_prompt.js";
|
|
21
|
+
export { MultiRetrievalQAChain } from "./router/multi_retrieval_qa.js";
|
package/dist/chains/index.js
CHANGED
|
@@ -14,3 +14,7 @@ export { RetrievalQAChain } from "./retrieval_qa.js";
|
|
|
14
14
|
export { ConstitutionalChain, } from "./constitutional_ai/constitutional_chain.js";
|
|
15
15
|
export { ConstitutionalPrinciple, PRINCIPLES, } from "./constitutional_ai/constitutional_principle.js";
|
|
16
16
|
export { OpenAIModerationChain } from "./openai_moderation.js";
|
|
17
|
+
export { MultiRouteChain, RouterChain, } from "./router/multi_route.js";
|
|
18
|
+
export { LLMRouterChain, } from "./router/llm_router.js";
|
|
19
|
+
export { MultiPromptChain } from "./router/multi_prompt.js";
|
|
20
|
+
export { MultiRetrievalQAChain } from "./router/multi_retrieval_qa.js";
|
|
@@ -69,7 +69,9 @@ class RetrievalQAChain extends base_js_1.BaseChain {
|
|
|
69
69
|
throw new Error("Not implemented");
|
|
70
70
|
}
|
|
71
71
|
static fromLLM(llm, retriever, options) {
|
|
72
|
-
const qaChain = (0, load_js_1.loadQAStuffChain)(llm
|
|
72
|
+
const qaChain = (0, load_js_1.loadQAStuffChain)(llm, {
|
|
73
|
+
prompt: options?.prompt,
|
|
74
|
+
});
|
|
73
75
|
return new this({
|
|
74
76
|
retriever,
|
|
75
77
|
combineDocumentsChain: qaChain,
|
|
@@ -2,6 +2,7 @@ import { BaseChain, ChainInputs } from "./base.js";
|
|
|
2
2
|
import { BaseLanguageModel } from "../base_language/index.js";
|
|
3
3
|
import { SerializedVectorDBQAChain } from "./serde.js";
|
|
4
4
|
import { ChainValues, BaseRetriever } from "../schema/index.js";
|
|
5
|
+
import { StuffQAChainParams } from "./question_answering/load.js";
|
|
5
6
|
import { CallbackManagerForChainRun } from "../callbacks/manager.js";
|
|
6
7
|
export type LoadValues = Record<string, any>;
|
|
7
8
|
export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
|
|
@@ -23,5 +24,5 @@ export declare class RetrievalQAChain extends BaseChain implements RetrievalQACh
|
|
|
23
24
|
_chainType(): "retrieval_qa";
|
|
24
25
|
static deserialize(_data: SerializedVectorDBQAChain, _values: LoadValues): Promise<RetrievalQAChain>;
|
|
25
26
|
serialize(): SerializedVectorDBQAChain;
|
|
26
|
-
static fromLLM(llm: BaseLanguageModel, retriever: BaseRetriever, options?: Partial<Omit<RetrievalQAChainInput, "combineDocumentsChain" | "index">>): RetrievalQAChain;
|
|
27
|
+
static fromLLM(llm: BaseLanguageModel, retriever: BaseRetriever, options?: Partial<Omit<RetrievalQAChainInput, "combineDocumentsChain" | "index">> & StuffQAChainParams): RetrievalQAChain;
|
|
27
28
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { BaseChain } from "./base.js";
|
|
2
|
-
import { loadQAStuffChain } from "./question_answering/load.js";
|
|
2
|
+
import { loadQAStuffChain, } from "./question_answering/load.js";
|
|
3
3
|
export class RetrievalQAChain extends BaseChain {
|
|
4
4
|
get inputKeys() {
|
|
5
5
|
return [this.inputKey];
|
|
@@ -66,7 +66,9 @@ export class RetrievalQAChain extends BaseChain {
|
|
|
66
66
|
throw new Error("Not implemented");
|
|
67
67
|
}
|
|
68
68
|
static fromLLM(llm, retriever, options) {
|
|
69
|
-
const qaChain = loadQAStuffChain(llm
|
|
69
|
+
const qaChain = loadQAStuffChain(llm, {
|
|
70
|
+
prompt: options?.prompt,
|
|
71
|
+
});
|
|
70
72
|
return new this({
|
|
71
73
|
retriever,
|
|
72
74
|
combineDocumentsChain: qaChain,
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LLMRouterChain = void 0;
|
|
4
|
+
const llm_chain_js_1 = require("../../chains/llm_chain.cjs");
|
|
5
|
+
const multi_route_js_1 = require("./multi_route.cjs");
|
|
6
|
+
class LLMRouterChain extends multi_route_js_1.RouterChain {
|
|
7
|
+
constructor(fields) {
|
|
8
|
+
super(fields);
|
|
9
|
+
Object.defineProperty(this, "llmChain", {
|
|
10
|
+
enumerable: true,
|
|
11
|
+
configurable: true,
|
|
12
|
+
writable: true,
|
|
13
|
+
value: void 0
|
|
14
|
+
});
|
|
15
|
+
this.llmChain = fields.llmChain;
|
|
16
|
+
}
|
|
17
|
+
get inputKeys() {
|
|
18
|
+
return this.llmChain.inputKeys;
|
|
19
|
+
}
|
|
20
|
+
async _call(values, runManager) {
|
|
21
|
+
return this.llmChain.predict(values, runManager?.getChild());
|
|
22
|
+
}
|
|
23
|
+
_chainType() {
|
|
24
|
+
return "llm_router_chain";
|
|
25
|
+
}
|
|
26
|
+
static fromLLM(llm, prompt, options) {
|
|
27
|
+
const llmChain = new llm_chain_js_1.LLMChain({ llm, prompt });
|
|
28
|
+
return new LLMRouterChain({ ...options, llmChain });
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
exports.LLMRouterChain = LLMRouterChain;
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { BasePromptTemplate } from "../../prompts/base.js";
|
|
2
|
+
import { LLMChain } from "../../chains/llm_chain.js";
|
|
3
|
+
import { RouterChain } from "./multi_route.js";
|
|
4
|
+
import { CallbackManagerForChainRun } from "../../callbacks/manager.js";
|
|
5
|
+
import { ChainValues } from "../../schema/index.js";
|
|
6
|
+
import { BaseLanguageModel } from "../../base_language/index.js";
|
|
7
|
+
import { ChainInputs } from "../../chains/base.js";
|
|
8
|
+
export type RouterOutputSchema = {
|
|
9
|
+
destination: string;
|
|
10
|
+
next_inputs: {
|
|
11
|
+
[key: string]: string;
|
|
12
|
+
};
|
|
13
|
+
};
|
|
14
|
+
export interface LLMRouterChainInput extends ChainInputs {
|
|
15
|
+
llmChain: LLMChain<RouterOutputSchema>;
|
|
16
|
+
}
|
|
17
|
+
export declare class LLMRouterChain extends RouterChain implements LLMRouterChainInput {
|
|
18
|
+
llmChain: LLMChain<RouterOutputSchema>;
|
|
19
|
+
constructor(fields: LLMRouterChainInput);
|
|
20
|
+
get inputKeys(): string[];
|
|
21
|
+
_call(values: ChainValues, runManager?: CallbackManagerForChainRun | undefined): Promise<RouterOutputSchema>;
|
|
22
|
+
_chainType(): string;
|
|
23
|
+
static fromLLM(llm: BaseLanguageModel, prompt: BasePromptTemplate, options?: Omit<LLMRouterChainInput, "llmChain">): LLMRouterChain;
|
|
24
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { LLMChain } from "../../chains/llm_chain.js";
|
|
2
|
+
import { RouterChain } from "./multi_route.js";
|
|
3
|
+
export class LLMRouterChain extends RouterChain {
|
|
4
|
+
constructor(fields) {
|
|
5
|
+
super(fields);
|
|
6
|
+
Object.defineProperty(this, "llmChain", {
|
|
7
|
+
enumerable: true,
|
|
8
|
+
configurable: true,
|
|
9
|
+
writable: true,
|
|
10
|
+
value: void 0
|
|
11
|
+
});
|
|
12
|
+
this.llmChain = fields.llmChain;
|
|
13
|
+
}
|
|
14
|
+
get inputKeys() {
|
|
15
|
+
return this.llmChain.inputKeys;
|
|
16
|
+
}
|
|
17
|
+
async _call(values, runManager) {
|
|
18
|
+
return this.llmChain.predict(values, runManager?.getChild());
|
|
19
|
+
}
|
|
20
|
+
_chainType() {
|
|
21
|
+
return "llm_router_chain";
|
|
22
|
+
}
|
|
23
|
+
static fromLLM(llm, prompt, options) {
|
|
24
|
+
const llmChain = new LLMChain({ llm, prompt });
|
|
25
|
+
return new LLMRouterChain({ ...options, llmChain });
|
|
26
|
+
}
|
|
27
|
+
}
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MultiPromptChain = void 0;
|
|
4
|
+
const zod_1 = require("zod");
|
|
5
|
+
const multi_route_js_1 = require("./multi_route.cjs");
|
|
6
|
+
const multi_prompt_prompt_js_1 = require("./multi_prompt_prompt.cjs");
|
|
7
|
+
const template_js_1 = require("../../prompts/template.cjs");
|
|
8
|
+
const llm_chain_js_1 = require("../../chains/llm_chain.cjs");
|
|
9
|
+
const prompt_js_1 = require("../../prompts/prompt.cjs");
|
|
10
|
+
const llm_router_js_1 = require("./llm_router.cjs");
|
|
11
|
+
const conversation_js_1 = require("../../chains/conversation.cjs");
|
|
12
|
+
const utils_js_1 = require("./utils.cjs");
|
|
13
|
+
const router_js_1 = require("../../output_parsers/router.cjs");
|
|
14
|
+
class MultiPromptChain extends multi_route_js_1.MultiRouteChain {
|
|
15
|
+
static fromPrompts(llm, promptNames, promptDescriptions, promptTemplates, defaultChain, options) {
|
|
16
|
+
const destinations = (0, utils_js_1.zipEntries)(promptNames, promptDescriptions).map(([name, desc]) => `${name}: ${desc}`);
|
|
17
|
+
const structuredOutputParserSchema = zod_1.z.object({
|
|
18
|
+
destination: zod_1.z
|
|
19
|
+
.string()
|
|
20
|
+
.optional()
|
|
21
|
+
.describe('name of the question answering system to use or "DEFAULT"'),
|
|
22
|
+
next_inputs: zod_1.z
|
|
23
|
+
.object({
|
|
24
|
+
input: zod_1.z
|
|
25
|
+
.string()
|
|
26
|
+
.describe("a potentially modified version of the original input"),
|
|
27
|
+
})
|
|
28
|
+
.describe("input to be fed to the next model"),
|
|
29
|
+
});
|
|
30
|
+
const outputParser = new router_js_1.RouterOutputParser(structuredOutputParserSchema);
|
|
31
|
+
const destinationsStr = destinations.join("\n");
|
|
32
|
+
const routerTemplate = (0, template_js_1.interpolateFString)((0, multi_prompt_prompt_js_1.STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE)(outputParser.getFormatInstructions({ interpolationDepth: 4 })), {
|
|
33
|
+
destinations: destinationsStr,
|
|
34
|
+
});
|
|
35
|
+
const routerPrompt = new prompt_js_1.PromptTemplate({
|
|
36
|
+
template: routerTemplate,
|
|
37
|
+
inputVariables: ["input"],
|
|
38
|
+
outputParser,
|
|
39
|
+
});
|
|
40
|
+
const routerChain = llm_router_js_1.LLMRouterChain.fromLLM(llm, routerPrompt);
|
|
41
|
+
const destinationChains = (0, utils_js_1.zipEntries)(promptNames, promptTemplates).reduce((acc, [name, template]) => {
|
|
42
|
+
let myPrompt;
|
|
43
|
+
if (typeof template === "object") {
|
|
44
|
+
myPrompt = template;
|
|
45
|
+
}
|
|
46
|
+
else if (typeof template === "string") {
|
|
47
|
+
myPrompt = new prompt_js_1.PromptTemplate({
|
|
48
|
+
template: template,
|
|
49
|
+
inputVariables: ["input"],
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
else {
|
|
53
|
+
throw new Error("Invalid prompt template");
|
|
54
|
+
}
|
|
55
|
+
acc[name] = new llm_chain_js_1.LLMChain({
|
|
56
|
+
llm,
|
|
57
|
+
prompt: myPrompt,
|
|
58
|
+
});
|
|
59
|
+
return acc;
|
|
60
|
+
}, {});
|
|
61
|
+
const convChain = new conversation_js_1.ConversationChain({
|
|
62
|
+
llm,
|
|
63
|
+
outputKey: "text",
|
|
64
|
+
});
|
|
65
|
+
return new MultiPromptChain({
|
|
66
|
+
routerChain,
|
|
67
|
+
destinationChains,
|
|
68
|
+
defaultChain: defaultChain ?? convChain,
|
|
69
|
+
...options,
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
_chainType() {
|
|
73
|
+
return "multi_prompt_chain";
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
exports.MultiPromptChain = MultiPromptChain;
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { BaseLanguageModel } from "../../base_language/index.js";
|
|
2
|
+
import { MultiRouteChain, MultiRouteChainInput } from "./multi_route.js";
|
|
3
|
+
import { BaseChain } from "../../chains/base.js";
|
|
4
|
+
import { PromptTemplate } from "../../prompts/prompt.js";
|
|
5
|
+
export declare class MultiPromptChain extends MultiRouteChain {
|
|
6
|
+
static fromPrompts(llm: BaseLanguageModel, promptNames: string[], promptDescriptions: string[], promptTemplates: string[] | PromptTemplate[], defaultChain?: BaseChain, options?: Omit<MultiRouteChainInput, "defaultChain">): MultiPromptChain;
|
|
7
|
+
_chainType(): string;
|
|
8
|
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { MultiRouteChain } from "./multi_route.js";
|
|
3
|
+
import { STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE } from "./multi_prompt_prompt.js";
|
|
4
|
+
import { interpolateFString } from "../../prompts/template.js";
|
|
5
|
+
import { LLMChain } from "../../chains/llm_chain.js";
|
|
6
|
+
import { PromptTemplate } from "../../prompts/prompt.js";
|
|
7
|
+
import { LLMRouterChain } from "./llm_router.js";
|
|
8
|
+
import { ConversationChain } from "../../chains/conversation.js";
|
|
9
|
+
import { zipEntries } from "./utils.js";
|
|
10
|
+
import { RouterOutputParser } from "../../output_parsers/router.js";
|
|
11
|
+
export class MultiPromptChain extends MultiRouteChain {
|
|
12
|
+
static fromPrompts(llm, promptNames, promptDescriptions, promptTemplates, defaultChain, options) {
|
|
13
|
+
const destinations = zipEntries(promptNames, promptDescriptions).map(([name, desc]) => `${name}: ${desc}`);
|
|
14
|
+
const structuredOutputParserSchema = z.object({
|
|
15
|
+
destination: z
|
|
16
|
+
.string()
|
|
17
|
+
.optional()
|
|
18
|
+
.describe('name of the question answering system to use or "DEFAULT"'),
|
|
19
|
+
next_inputs: z
|
|
20
|
+
.object({
|
|
21
|
+
input: z
|
|
22
|
+
.string()
|
|
23
|
+
.describe("a potentially modified version of the original input"),
|
|
24
|
+
})
|
|
25
|
+
.describe("input to be fed to the next model"),
|
|
26
|
+
});
|
|
27
|
+
const outputParser = new RouterOutputParser(structuredOutputParserSchema);
|
|
28
|
+
const destinationsStr = destinations.join("\n");
|
|
29
|
+
const routerTemplate = interpolateFString(STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE(outputParser.getFormatInstructions({ interpolationDepth: 4 })), {
|
|
30
|
+
destinations: destinationsStr,
|
|
31
|
+
});
|
|
32
|
+
const routerPrompt = new PromptTemplate({
|
|
33
|
+
template: routerTemplate,
|
|
34
|
+
inputVariables: ["input"],
|
|
35
|
+
outputParser,
|
|
36
|
+
});
|
|
37
|
+
const routerChain = LLMRouterChain.fromLLM(llm, routerPrompt);
|
|
38
|
+
const destinationChains = zipEntries(promptNames, promptTemplates).reduce((acc, [name, template]) => {
|
|
39
|
+
let myPrompt;
|
|
40
|
+
if (typeof template === "object") {
|
|
41
|
+
myPrompt = template;
|
|
42
|
+
}
|
|
43
|
+
else if (typeof template === "string") {
|
|
44
|
+
myPrompt = new PromptTemplate({
|
|
45
|
+
template: template,
|
|
46
|
+
inputVariables: ["input"],
|
|
47
|
+
});
|
|
48
|
+
}
|
|
49
|
+
else {
|
|
50
|
+
throw new Error("Invalid prompt template");
|
|
51
|
+
}
|
|
52
|
+
acc[name] = new LLMChain({
|
|
53
|
+
llm,
|
|
54
|
+
prompt: myPrompt,
|
|
55
|
+
});
|
|
56
|
+
return acc;
|
|
57
|
+
}, {});
|
|
58
|
+
const convChain = new ConversationChain({
|
|
59
|
+
llm,
|
|
60
|
+
outputKey: "text",
|
|
61
|
+
});
|
|
62
|
+
return new MultiPromptChain({
|
|
63
|
+
routerChain,
|
|
64
|
+
destinationChains,
|
|
65
|
+
defaultChain: defaultChain ?? convChain,
|
|
66
|
+
...options,
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
_chainType() {
|
|
70
|
+
return "multi_prompt_chain";
|
|
71
|
+
}
|
|
72
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE = exports.MULTI_PROMPT_ROUTER_TEMPLATE = void 0;
|
|
4
|
+
exports.MULTI_PROMPT_ROUTER_TEMPLATE = `Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.
|
|
5
|
+
|
|
6
|
+
<< FORMATTING >>
|
|
7
|
+
Return a markdown code snippet with a JSON object formatted to look like:
|
|
8
|
+
\`\`\`json
|
|
9
|
+
{{{{
|
|
10
|
+
"destination": string \\ name of the prompt to use or "DEFAULT"
|
|
11
|
+
"next_inputs": string \\ a potentially modified version of the original input
|
|
12
|
+
}}}}
|
|
13
|
+
\`\`\`
|
|
14
|
+
|
|
15
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
16
|
+
REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed.
|
|
17
|
+
|
|
18
|
+
<< CANDIDATE PROMPTS >>
|
|
19
|
+
{destinations}
|
|
20
|
+
|
|
21
|
+
<< INPUT >>
|
|
22
|
+
{{input}}
|
|
23
|
+
|
|
24
|
+
<< OUTPUT >>
|
|
25
|
+
`;
|
|
26
|
+
const STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE = (formatting) => `Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.
|
|
27
|
+
|
|
28
|
+
<< FORMATTING >>
|
|
29
|
+
${formatting}
|
|
30
|
+
|
|
31
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
32
|
+
REMEMBER: "next_inputs.input" can just be the original input if you don't think any modifications are needed.
|
|
33
|
+
|
|
34
|
+
<< CANDIDATE PROMPTS >>
|
|
35
|
+
{destinations}
|
|
36
|
+
|
|
37
|
+
<< INPUT >>
|
|
38
|
+
{{input}}
|
|
39
|
+
|
|
40
|
+
<< OUTPUT >>
|
|
41
|
+
`;
|
|
42
|
+
exports.STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE = STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE;
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
export declare const MULTI_PROMPT_ROUTER_TEMPLATE = "Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\n\n<< FORMATTING >>\nReturn a markdown code snippet with a JSON object formatted to look like:\n```json\n{{{{\n \"destination\": string \\ name of the prompt to use or \"DEFAULT\"\n \"next_inputs\": string \\ a potentially modified version of the original input\n}}}}\n```\n\nREMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\nREMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n\n<< CANDIDATE PROMPTS >>\n{destinations}\n\n<< INPUT >>\n{{input}}\n\n<< OUTPUT >>\n";
|
|
2
|
+
export declare const STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE: (formatting: string) => string;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
export const MULTI_PROMPT_ROUTER_TEMPLATE = `Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.
|
|
2
|
+
|
|
3
|
+
<< FORMATTING >>
|
|
4
|
+
Return a markdown code snippet with a JSON object formatted to look like:
|
|
5
|
+
\`\`\`json
|
|
6
|
+
{{{{
|
|
7
|
+
"destination": string \\ name of the prompt to use or "DEFAULT"
|
|
8
|
+
"next_inputs": string \\ a potentially modified version of the original input
|
|
9
|
+
}}}}
|
|
10
|
+
\`\`\`
|
|
11
|
+
|
|
12
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
13
|
+
REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed.
|
|
14
|
+
|
|
15
|
+
<< CANDIDATE PROMPTS >>
|
|
16
|
+
{destinations}
|
|
17
|
+
|
|
18
|
+
<< INPUT >>
|
|
19
|
+
{{input}}
|
|
20
|
+
|
|
21
|
+
<< OUTPUT >>
|
|
22
|
+
`;
|
|
23
|
+
export const STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE = (formatting) => `Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.
|
|
24
|
+
|
|
25
|
+
<< FORMATTING >>
|
|
26
|
+
${formatting}
|
|
27
|
+
|
|
28
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
29
|
+
REMEMBER: "next_inputs.input" can just be the original input if you don't think any modifications are needed.
|
|
30
|
+
|
|
31
|
+
<< CANDIDATE PROMPTS >>
|
|
32
|
+
{destinations}
|
|
33
|
+
|
|
34
|
+
<< INPUT >>
|
|
35
|
+
{{input}}
|
|
36
|
+
|
|
37
|
+
<< OUTPUT >>
|
|
38
|
+
`;
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE = exports.MULTI_RETRIEVAL_ROUTER_TEMPLATE = void 0;
|
|
4
|
+
exports.MULTI_RETRIEVAL_ROUTER_TEMPLATE = `Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response.
|
|
5
|
+
|
|
6
|
+
<< FORMATTING >>
|
|
7
|
+
Return a markdown code snippet with a JSON object formatted to look like:
|
|
8
|
+
\`\`\`json
|
|
9
|
+
{{{{
|
|
10
|
+
"destination": string \\ name of the question answering system to use or "DEFAULT"
|
|
11
|
+
"next_inputs": string \\ a potentially modified version of the original input
|
|
12
|
+
}}}}
|
|
13
|
+
\`\`\`
|
|
14
|
+
|
|
15
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
16
|
+
REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed.
|
|
17
|
+
|
|
18
|
+
<< CANDIDATE PROMPTS >>
|
|
19
|
+
{destinations}
|
|
20
|
+
|
|
21
|
+
<< INPUT >>
|
|
22
|
+
{{input}}
|
|
23
|
+
|
|
24
|
+
<< OUTPUT >>
|
|
25
|
+
`;
|
|
26
|
+
const STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE = (formatting) => `Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response.
|
|
27
|
+
|
|
28
|
+
<< FORMATTING >>
|
|
29
|
+
${formatting}
|
|
30
|
+
|
|
31
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
32
|
+
REMEMBER: "next_inputs.query" can just be the original input if you don't think any modifications are needed.
|
|
33
|
+
|
|
34
|
+
<< CANDIDATE PROMPTS >>
|
|
35
|
+
{destinations}
|
|
36
|
+
|
|
37
|
+
<< INPUT >>
|
|
38
|
+
{{input}}
|
|
39
|
+
|
|
40
|
+
<< OUTPUT >>
|
|
41
|
+
`;
|
|
42
|
+
exports.STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE = STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE;
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
export declare const MULTI_RETRIEVAL_ROUTER_TEMPLATE = "Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response.\n\n<< FORMATTING >>\nReturn a markdown code snippet with a JSON object formatted to look like:\n```json\n{{{{\n \"destination\": string \\ name of the question answering system to use or \"DEFAULT\"\n \"next_inputs\": string \\ a potentially modified version of the original input\n}}}}\n```\n\nREMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\nREMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n\n<< CANDIDATE PROMPTS >>\n{destinations}\n\n<< INPUT >>\n{{input}}\n\n<< OUTPUT >>\n";
|
|
2
|
+
export declare const STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE: (formatting: string) => string;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
export const MULTI_RETRIEVAL_ROUTER_TEMPLATE = `Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response.
|
|
2
|
+
|
|
3
|
+
<< FORMATTING >>
|
|
4
|
+
Return a markdown code snippet with a JSON object formatted to look like:
|
|
5
|
+
\`\`\`json
|
|
6
|
+
{{{{
|
|
7
|
+
"destination": string \\ name of the question answering system to use or "DEFAULT"
|
|
8
|
+
"next_inputs": string \\ a potentially modified version of the original input
|
|
9
|
+
}}}}
|
|
10
|
+
\`\`\`
|
|
11
|
+
|
|
12
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
13
|
+
REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed.
|
|
14
|
+
|
|
15
|
+
<< CANDIDATE PROMPTS >>
|
|
16
|
+
{destinations}
|
|
17
|
+
|
|
18
|
+
<< INPUT >>
|
|
19
|
+
{{input}}
|
|
20
|
+
|
|
21
|
+
<< OUTPUT >>
|
|
22
|
+
`;
|
|
23
|
+
export const STRUCTURED_MULTI_RETRIEVAL_ROUTER_TEMPLATE = (formatting) => `Given a query to a question answering system, select the system best suited for the input. You will be given the names of the available systems and a description of what questions the system is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response.
|
|
24
|
+
|
|
25
|
+
<< FORMATTING >>
|
|
26
|
+
${formatting}
|
|
27
|
+
|
|
28
|
+
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
|
29
|
+
REMEMBER: "next_inputs.query" can just be the original input if you don't think any modifications are needed.
|
|
30
|
+
|
|
31
|
+
<< CANDIDATE PROMPTS >>
|
|
32
|
+
{destinations}
|
|
33
|
+
|
|
34
|
+
<< INPUT >>
|
|
35
|
+
{{input}}
|
|
36
|
+
|
|
37
|
+
<< OUTPUT >>
|
|
38
|
+
`;
|