langchain 0.1.35 → 0.1.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/dist/chains/conversational_retrieval_chain.cjs +61 -19
  2. package/dist/chains/conversational_retrieval_chain.d.ts +61 -19
  3. package/dist/chains/conversational_retrieval_chain.js +61 -19
  4. package/dist/chains/llm_chain.cjs +10 -5
  5. package/dist/chains/llm_chain.d.ts +10 -5
  6. package/dist/chains/llm_chain.js +10 -5
  7. package/dist/chains/openai_functions/base.cjs +2 -0
  8. package/dist/chains/openai_functions/base.d.ts +2 -0
  9. package/dist/chains/openai_functions/base.js +2 -0
  10. package/dist/chains/query_constructor/index.cjs +5 -8
  11. package/dist/chains/query_constructor/index.d.ts +5 -4
  12. package/dist/chains/query_constructor/index.js +3 -6
  13. package/dist/chains/query_constructor/ir.cjs +15 -139
  14. package/dist/chains/query_constructor/ir.d.ts +1 -138
  15. package/dist/chains/query_constructor/ir.js +1 -132
  16. package/dist/chains/query_constructor/prompt.cjs +2 -2
  17. package/dist/chains/query_constructor/prompt.d.ts +1 -1
  18. package/dist/chains/query_constructor/prompt.js +1 -1
  19. package/dist/chains/retrieval_qa.cjs +23 -14
  20. package/dist/chains/retrieval_qa.d.ts +23 -14
  21. package/dist/chains/retrieval_qa.js +23 -14
  22. package/dist/document_loaders/fs/unstructured.cjs +1 -1
  23. package/dist/document_loaders/fs/unstructured.js +1 -1
  24. package/dist/document_loaders/web/browserbase.cjs +87 -0
  25. package/dist/document_loaders/web/browserbase.d.ts +49 -0
  26. package/dist/document_loaders/web/browserbase.js +80 -0
  27. package/dist/document_loaders/web/firecrawl.cjs +88 -0
  28. package/dist/document_loaders/web/firecrawl.d.ts +48 -0
  29. package/dist/document_loaders/web/firecrawl.js +81 -0
  30. package/dist/document_loaders/web/s3.cjs +2 -2
  31. package/dist/document_loaders/web/s3.js +2 -2
  32. package/dist/load/import_constants.cjs +2 -0
  33. package/dist/load/import_constants.js +2 -0
  34. package/dist/output_parsers/expression.cjs +1 -1
  35. package/dist/output_parsers/expression.d.ts +1 -1
  36. package/dist/output_parsers/expression.js +1 -1
  37. package/dist/retrievers/self_query/base.cjs +3 -136
  38. package/dist/retrievers/self_query/base.d.ts +1 -69
  39. package/dist/retrievers/self_query/base.js +1 -134
  40. package/dist/retrievers/self_query/chroma.cjs +9 -10
  41. package/dist/retrievers/self_query/chroma.d.ts +1 -1
  42. package/dist/retrievers/self_query/chroma.js +1 -2
  43. package/dist/retrievers/self_query/functional.cjs +2 -195
  44. package/dist/retrievers/self_query/functional.d.ts +1 -87
  45. package/dist/retrievers/self_query/functional.js +1 -194
  46. package/dist/retrievers/self_query/index.cjs +9 -13
  47. package/dist/retrievers/self_query/index.d.ts +11 -8
  48. package/dist/retrievers/self_query/index.js +7 -11
  49. package/dist/retrievers/self_query/pinecone.cjs +9 -10
  50. package/dist/retrievers/self_query/pinecone.d.ts +1 -1
  51. package/dist/retrievers/self_query/pinecone.js +1 -2
  52. package/dist/retrievers/self_query/supabase.cjs +28 -30
  53. package/dist/retrievers/self_query/supabase.d.ts +1 -2
  54. package/dist/retrievers/self_query/supabase.js +1 -3
  55. package/dist/retrievers/self_query/supabase_utils.cjs +2 -2
  56. package/dist/retrievers/self_query/supabase_utils.d.ts +1 -1
  57. package/dist/retrievers/self_query/supabase_utils.js +1 -1
  58. package/dist/retrievers/self_query/vectara.cjs +15 -17
  59. package/dist/retrievers/self_query/vectara.d.ts +1 -2
  60. package/dist/retrievers/self_query/vectara.js +1 -3
  61. package/dist/retrievers/self_query/weaviate.cjs +19 -21
  62. package/dist/retrievers/self_query/weaviate.d.ts +1 -2
  63. package/dist/retrievers/self_query/weaviate.js +1 -3
  64. package/dist/smith/config.d.ts +4 -4
  65. package/dist/storage/in_memory.cjs +2 -81
  66. package/dist/storage/in_memory.d.ts +1 -49
  67. package/dist/storage/in_memory.js +1 -80
  68. package/dist/text_splitter.cjs +15 -727
  69. package/dist/text_splitter.d.ts +1 -77
  70. package/dist/text_splitter.js +1 -720
  71. package/dist/vectorstores/qdrant.cjs +2 -0
  72. package/dist/vectorstores/qdrant.js +2 -0
  73. package/document_loaders/web/browserbase.cjs +1 -0
  74. package/document_loaders/web/browserbase.d.cts +1 -0
  75. package/document_loaders/web/browserbase.d.ts +1 -0
  76. package/document_loaders/web/browserbase.js +1 -0
  77. package/document_loaders/web/firecrawl.cjs +1 -0
  78. package/document_loaders/web/firecrawl.d.cts +1 -0
  79. package/document_loaders/web/firecrawl.d.ts +1 -0
  80. package/document_loaders/web/firecrawl.js +1 -0
  81. package/package.json +40 -3
  82. package/dist/retrievers/self_query/utils.cjs +0 -94
  83. package/dist/retrievers/self_query/utils.d.ts +0 -29
  84. package/dist/retrievers/self_query/utils.js +0 -85
@@ -13,37 +13,79 @@ Chat History:
13
13
  Follow Up Input: {question}
14
14
  Standalone question:`;
15
15
  /**
16
+ * @deprecated This class will be removed in 0.3.0. See below for an example implementation using
17
+ * `createRetrievalChain`.
18
+ *
16
19
  * Class for conducting conversational question-answering tasks with a
17
20
  * retrieval component. Extends the BaseChain class and implements the
18
21
  * ConversationalRetrievalQAChainInput interface.
19
22
  * @example
20
23
  * ```typescript
21
- * const model = new ChatAnthropic({});
22
- *
23
- * const text = fs.readFileSync("state_of_the_union.txt", "utf8");
24
- *
25
- * const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
26
- * const docs = await textSplitter.createDocuments([text]);
24
+ * import { ChatAnthropic } from "@langchain/anthropic";
25
+ * import {
26
+ * ChatPromptTemplate,
27
+ * MessagesPlaceholder,
28
+ * } from "@langchain/core/prompts";
29
+ * import { BaseMessage } from "@langchain/core/messages";
30
+ * import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
31
+ * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
32
+ * import { createRetrievalChain } from "langchain/chains/retrieval";
27
33
  *
28
- * const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
34
+ * const retriever = ...your retriever;
35
+ * const llm = new ChatAnthropic();
29
36
  *
30
- * const chain = ConversationalRetrievalQAChain.fromLLM(
31
- * model,
32
- * vectorStore.asRetriever(),
33
- * );
37
+ * // Contextualize question
38
+ * const contextualizeQSystemPrompt = `
39
+ * Given a chat history and the latest user question
40
+ * which might reference context in the chat history,
41
+ * formulate a standalone question which can be understood
42
+ * without the chat history. Do NOT answer the question, just
43
+ * reformulate it if needed and otherwise return it as is.`;
44
+ * const contextualizeQPrompt = ChatPromptTemplate.fromMessages([
45
+ * ["system", contextualizeQSystemPrompt],
46
+ * new MessagesPlaceholder("chat_history"),
47
+ * ["human", "{input}"],
48
+ * ]);
49
+ * const historyAwareRetriever = await createHistoryAwareRetriever({
50
+ * llm,
51
+ * retriever,
52
+ * rephrasePrompt: contextualizeQPrompt,
53
+ * });
34
54
  *
35
- * const question = "What did the president say about Justice Breyer?";
55
+ * // Answer question
56
+ * const qaSystemPrompt = `
57
+ * You are an assistant for question-answering tasks. Use
58
+ * the following pieces of retrieved context to answer the
59
+ * question. If you don't know the answer, just say that you
60
+ * don't know. Use three sentences maximum and keep the answer
61
+ * concise.
62
+ * \n\n
63
+ * {context}`;
64
+ * const qaPrompt = ChatPromptTemplate.fromMessages([
65
+ * ["system", qaSystemPrompt],
66
+ * new MessagesPlaceholder("chat_history"),
67
+ * ["human", "{input}"],
68
+ * ]);
36
69
  *
37
- * const res = await chain.call({ question, chat_history: "" });
38
- * console.log(res);
70
+ * // Below we use createStuffDocuments_chain to feed all retrieved context
71
+ * // into the LLM. Note that we can also use StuffDocumentsChain and other
72
+ * // instances of BaseCombineDocumentsChain.
73
+ * const questionAnswerChain = await createStuffDocumentsChain({
74
+ * llm,
75
+ * prompt: qaPrompt,
76
+ * });
39
77
  *
40
- * const chatHistory = `${question}\n${res.text}`;
41
- * const followUpRes = await chain.call({
42
- * question: "Was that nice?",
43
- * chat_history: chatHistory,
78
+ * const ragChain = await createRetrievalChain({
79
+ * retriever: historyAwareRetriever,
80
+ * combineDocsChain: questionAnswerChain,
44
81
  * });
45
- * console.log(followUpRes);
46
82
  *
83
+ * // Usage:
84
+ * const chat_history: BaseMessage[] = [];
85
+ * const response = await ragChain.invoke({
86
+ * chat_history,
87
+ * input: "...",
88
+ * });
47
89
  * ```
48
90
  */
49
91
  class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
@@ -21,37 +21,79 @@ export interface ConversationalRetrievalQAChainInput extends ChainInputs {
21
21
  inputKey?: string;
22
22
  }
23
23
  /**
24
+ * @deprecated This class will be removed in 0.3.0. See below for an example implementation using
25
+ * `createRetrievalChain`.
26
+ *
24
27
  * Class for conducting conversational question-answering tasks with a
25
28
  * retrieval component. Extends the BaseChain class and implements the
26
29
  * ConversationalRetrievalQAChainInput interface.
27
30
  * @example
28
31
  * ```typescript
29
- * const model = new ChatAnthropic({});
30
- *
31
- * const text = fs.readFileSync("state_of_the_union.txt", "utf8");
32
- *
33
- * const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
34
- * const docs = await textSplitter.createDocuments([text]);
32
+ * import { ChatAnthropic } from "@langchain/anthropic";
33
+ * import {
34
+ * ChatPromptTemplate,
35
+ * MessagesPlaceholder,
36
+ * } from "@langchain/core/prompts";
37
+ * import { BaseMessage } from "@langchain/core/messages";
38
+ * import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
39
+ * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
40
+ * import { createRetrievalChain } from "langchain/chains/retrieval";
35
41
  *
36
- * const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
42
+ * const retriever = ...your retriever;
43
+ * const llm = new ChatAnthropic();
37
44
  *
38
- * const chain = ConversationalRetrievalQAChain.fromLLM(
39
- * model,
40
- * vectorStore.asRetriever(),
41
- * );
45
+ * // Contextualize question
46
+ * const contextualizeQSystemPrompt = `
47
+ * Given a chat history and the latest user question
48
+ * which might reference context in the chat history,
49
+ * formulate a standalone question which can be understood
50
+ * without the chat history. Do NOT answer the question, just
51
+ * reformulate it if needed and otherwise return it as is.`;
52
+ * const contextualizeQPrompt = ChatPromptTemplate.fromMessages([
53
+ * ["system", contextualizeQSystemPrompt],
54
+ * new MessagesPlaceholder("chat_history"),
55
+ * ["human", "{input}"],
56
+ * ]);
57
+ * const historyAwareRetriever = await createHistoryAwareRetriever({
58
+ * llm,
59
+ * retriever,
60
+ * rephrasePrompt: contextualizeQPrompt,
61
+ * });
42
62
  *
43
- * const question = "What did the president say about Justice Breyer?";
63
+ * // Answer question
64
+ * const qaSystemPrompt = `
65
+ * You are an assistant for question-answering tasks. Use
66
+ * the following pieces of retrieved context to answer the
67
+ * question. If you don't know the answer, just say that you
68
+ * don't know. Use three sentences maximum and keep the answer
69
+ * concise.
70
+ * \n\n
71
+ * {context}`;
72
+ * const qaPrompt = ChatPromptTemplate.fromMessages([
73
+ * ["system", qaSystemPrompt],
74
+ * new MessagesPlaceholder("chat_history"),
75
+ * ["human", "{input}"],
76
+ * ]);
44
77
  *
45
- * const res = await chain.call({ question, chat_history: "" });
46
- * console.log(res);
78
+ * // Below we use createStuffDocuments_chain to feed all retrieved context
79
+ * // into the LLM. Note that we can also use StuffDocumentsChain and other
80
+ * // instances of BaseCombineDocumentsChain.
81
+ * const questionAnswerChain = await createStuffDocumentsChain({
82
+ * llm,
83
+ * prompt: qaPrompt,
84
+ * });
47
85
  *
48
- * const chatHistory = `${question}\n${res.text}`;
49
- * const followUpRes = await chain.call({
50
- * question: "Was that nice?",
51
- * chat_history: chatHistory,
86
+ * const ragChain = await createRetrievalChain({
87
+ * retriever: historyAwareRetriever,
88
+ * combineDocsChain: questionAnswerChain,
52
89
  * });
53
- * console.log(followUpRes);
54
90
  *
91
+ * // Usage:
92
+ * const chat_history: BaseMessage[] = [];
93
+ * const response = await ragChain.invoke({
94
+ * chat_history,
95
+ * input: "...",
96
+ * });
55
97
  * ```
56
98
  */
57
99
  export declare class ConversationalRetrievalQAChain extends BaseChain implements ConversationalRetrievalQAChainInput {
@@ -10,37 +10,79 @@ Chat History:
10
10
  Follow Up Input: {question}
11
11
  Standalone question:`;
12
12
  /**
13
+ * @deprecated This class will be removed in 0.3.0. See below for an example implementation using
14
+ * `createRetrievalChain`.
15
+ *
13
16
  * Class for conducting conversational question-answering tasks with a
14
17
  * retrieval component. Extends the BaseChain class and implements the
15
18
  * ConversationalRetrievalQAChainInput interface.
16
19
  * @example
17
20
  * ```typescript
18
- * const model = new ChatAnthropic({});
19
- *
20
- * const text = fs.readFileSync("state_of_the_union.txt", "utf8");
21
- *
22
- * const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
23
- * const docs = await textSplitter.createDocuments([text]);
21
+ * import { ChatAnthropic } from "@langchain/anthropic";
22
+ * import {
23
+ * ChatPromptTemplate,
24
+ * MessagesPlaceholder,
25
+ * } from "@langchain/core/prompts";
26
+ * import { BaseMessage } from "@langchain/core/messages";
27
+ * import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
28
+ * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
29
+ * import { createRetrievalChain } from "langchain/chains/retrieval";
24
30
  *
25
- * const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
31
+ * const retriever = ...your retriever;
32
+ * const llm = new ChatAnthropic();
26
33
  *
27
- * const chain = ConversationalRetrievalQAChain.fromLLM(
28
- * model,
29
- * vectorStore.asRetriever(),
30
- * );
34
+ * // Contextualize question
35
+ * const contextualizeQSystemPrompt = `
36
+ * Given a chat history and the latest user question
37
+ * which might reference context in the chat history,
38
+ * formulate a standalone question which can be understood
39
+ * without the chat history. Do NOT answer the question, just
40
+ * reformulate it if needed and otherwise return it as is.`;
41
+ * const contextualizeQPrompt = ChatPromptTemplate.fromMessages([
42
+ * ["system", contextualizeQSystemPrompt],
43
+ * new MessagesPlaceholder("chat_history"),
44
+ * ["human", "{input}"],
45
+ * ]);
46
+ * const historyAwareRetriever = await createHistoryAwareRetriever({
47
+ * llm,
48
+ * retriever,
49
+ * rephrasePrompt: contextualizeQPrompt,
50
+ * });
31
51
  *
32
- * const question = "What did the president say about Justice Breyer?";
52
+ * // Answer question
53
+ * const qaSystemPrompt = `
54
+ * You are an assistant for question-answering tasks. Use
55
+ * the following pieces of retrieved context to answer the
56
+ * question. If you don't know the answer, just say that you
57
+ * don't know. Use three sentences maximum and keep the answer
58
+ * concise.
59
+ * \n\n
60
+ * {context}`;
61
+ * const qaPrompt = ChatPromptTemplate.fromMessages([
62
+ * ["system", qaSystemPrompt],
63
+ * new MessagesPlaceholder("chat_history"),
64
+ * ["human", "{input}"],
65
+ * ]);
33
66
  *
34
- * const res = await chain.call({ question, chat_history: "" });
35
- * console.log(res);
67
+ * // Below we use createStuffDocuments_chain to feed all retrieved context
68
+ * // into the LLM. Note that we can also use StuffDocumentsChain and other
69
+ * // instances of BaseCombineDocumentsChain.
70
+ * const questionAnswerChain = await createStuffDocumentsChain({
71
+ * llm,
72
+ * prompt: qaPrompt,
73
+ * });
36
74
  *
37
- * const chatHistory = `${question}\n${res.text}`;
38
- * const followUpRes = await chain.call({
39
- * question: "Was that nice?",
40
- * chat_history: chatHistory,
75
+ * const ragChain = await createRetrievalChain({
76
+ * retriever: historyAwareRetriever,
77
+ * combineDocsChain: questionAnswerChain,
41
78
  * });
42
- * console.log(followUpRes);
43
79
  *
80
+ * // Usage:
81
+ * const chat_history: BaseMessage[] = [];
82
+ * const response = await ragChain.invoke({
83
+ * chat_history,
84
+ * input: "...",
85
+ * });
44
86
  * ```
45
87
  */
46
88
  export class ConversationalRetrievalQAChain extends BaseChain {
@@ -29,16 +29,21 @@ function _getLanguageModel(llmLike) {
29
29
  }
30
30
  }
31
31
  /**
32
+ * @deprecated This class will be removed in 0.3.0. Use the LangChain Expression Language (LCEL) instead.
33
+ * See the example below for how to use LCEL with the LLMChain class:
34
+ *
32
35
  * Chain to run queries against LLMs.
33
36
  *
34
37
  * @example
35
38
  * ```ts
36
- * import { LLMChain } from "langchain/chains";
37
- * import { OpenAI } from "langchain/llms/openai";
38
- * import { PromptTemplate } from "langchain/prompts";
39
+ * import { ChatPromptTemplate } from "@langchain/core/prompts";
40
+ * import { ChatOpenAI } from "@langchain/openai";
41
+ *
42
+ * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
43
+ * const llm = new ChatOpenAI();
44
+ * const chain = prompt.pipe(llm);
39
45
  *
40
- * const prompt = PromptTemplate.fromTemplate("Tell me a {adjective} joke");
41
- * const llm = new LLMChain({ llm: new OpenAI(), prompt });
46
+ * const response = await chain.invoke({ adjective: "funny" });
42
47
  * ```
43
48
  */
44
49
  class LLMChain extends base_js_1.BaseChain {
@@ -29,16 +29,21 @@ export interface LLMChainInput<T extends string | object = string, Model extends
29
29
  outputKey?: string;
30
30
  }
31
31
  /**
32
+ * @deprecated This class will be removed in 0.3.0. Use the LangChain Expression Language (LCEL) instead.
33
+ * See the example below for how to use LCEL with the LLMChain class:
34
+ *
32
35
  * Chain to run queries against LLMs.
33
36
  *
34
37
  * @example
35
38
  * ```ts
36
- * import { LLMChain } from "langchain/chains";
37
- * import { OpenAI } from "langchain/llms/openai";
38
- * import { PromptTemplate } from "langchain/prompts";
39
+ * import { ChatPromptTemplate } from "@langchain/core/prompts";
40
+ * import { ChatOpenAI } from "@langchain/openai";
41
+ *
42
+ * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
43
+ * const llm = new ChatOpenAI();
44
+ * const chain = prompt.pipe(llm);
39
45
  *
40
- * const prompt = PromptTemplate.fromTemplate("Tell me a {adjective} joke");
41
- * const llm = new LLMChain({ llm: new OpenAI(), prompt });
46
+ * const response = await chain.invoke({ adjective: "funny" });
42
47
  * ```
43
48
  */
44
49
  export declare class LLMChain<T extends string | object = string, Model extends LLMType = LLMType> extends BaseChain implements LLMChainInput<T> {
@@ -26,16 +26,21 @@ function _getLanguageModel(llmLike) {
26
26
  }
27
27
  }
28
28
  /**
29
+ * @deprecated This class will be removed in 0.3.0. Use the LangChain Expression Language (LCEL) instead.
30
+ * See the example below for how to use LCEL with the LLMChain class:
31
+ *
29
32
  * Chain to run queries against LLMs.
30
33
  *
31
34
  * @example
32
35
  * ```ts
33
- * import { LLMChain } from "langchain/chains";
34
- * import { OpenAI } from "langchain/llms/openai";
35
- * import { PromptTemplate } from "langchain/prompts";
36
+ * import { ChatPromptTemplate } from "@langchain/core/prompts";
37
+ * import { ChatOpenAI } from "@langchain/openai";
38
+ *
39
+ * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
40
+ * const llm = new ChatOpenAI();
41
+ * const chain = prompt.pipe(llm);
36
42
  *
37
- * const prompt = PromptTemplate.fromTemplate("Tell me a {adjective} joke");
38
- * const llm = new LLMChain({ llm: new OpenAI(), prompt });
43
+ * const response = await chain.invoke({ adjective: "funny" });
39
44
  * ```
40
45
  */
41
46
  export class LLMChain extends BaseChain {
@@ -71,6 +71,8 @@ function isZodSchema(schema) {
71
71
  return typeof schema.safeParse === "function";
72
72
  }
73
73
  /**
74
+ * @deprecated Prefer the `.withStructuredOutput` method on chat model classes.
75
+ *
74
76
  * Create a runnable that uses an OpenAI function to get a structured output.
75
77
  * @param config Params required to create the runnable.
76
78
  * @returns A runnable sequence that will pass the given function to the model when run.
@@ -98,6 +98,8 @@ export type CreateStructuredOutputRunnableConfig<RunInput extends Record<string,
98
98
  outputParser?: BaseOutputParser<RunOutput>;
99
99
  };
100
100
  /**
101
+ * @deprecated Prefer the `.withStructuredOutput` method on chat model classes.
102
+ *
101
103
  * Create a runnable that uses an OpenAI function to get a structured output.
102
104
  * @param config Params required to create the runnable.
103
105
  * @returns A runnable sequence that will pass the given function to the model when run.
@@ -67,6 +67,8 @@ function isZodSchema(schema) {
67
67
  return typeof schema.safeParse === "function";
68
68
  }
69
69
  /**
70
+ * @deprecated Prefer the `.withStructuredOutput` method on chat model classes.
71
+ *
70
72
  * Create a runnable that uses an OpenAI function to get a structured output.
71
73
  * @param config Params required to create the runnable.
72
74
  * @returns A runnable sequence that will pass the given function to the model when run.
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.loadQueryConstructorChain = exports.formatAttributeInfo = exports.StructuredQueryOutputParser = exports.EXAMPLE_PROMPT = exports.DEFAULT_SUFFIX = exports.DEFAULT_SCHEMA = exports.DEFAULT_PREFIX = exports.DEFAULT_EXAMPLES = exports.QueryTransformer = void 0;
3
+ exports.loadQueryConstructorRunnable = exports.formatAttributeInfo = exports.StructuredQueryOutputParser = exports.EXAMPLE_PROMPT = exports.DEFAULT_SUFFIX = exports.DEFAULT_SCHEMA = exports.DEFAULT_PREFIX = exports.DEFAULT_EXAMPLES = exports.QueryTransformer = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const prompts_1 = require("@langchain/core/prompts");
6
6
  const parser_js_1 = require("./parser.cjs");
@@ -12,7 +12,6 @@ Object.defineProperty(exports, "DEFAULT_PREFIX", { enumerable: true, get: functi
12
12
  Object.defineProperty(exports, "DEFAULT_SCHEMA", { enumerable: true, get: function () { return prompt_js_1.DEFAULT_SCHEMA; } });
13
13
  Object.defineProperty(exports, "DEFAULT_SUFFIX", { enumerable: true, get: function () { return prompt_js_1.DEFAULT_SUFFIX; } });
14
14
  Object.defineProperty(exports, "EXAMPLE_PROMPT", { enumerable: true, get: function () { return prompt_js_1.EXAMPLE_PROMPT; } });
15
- const llm_chain_js_1 = require("../llm_chain.cjs");
16
15
  const structured_js_1 = require("../../output_parsers/structured.cjs");
17
16
  const queryInputSchema = /* #__PURE__ */ zod_1.z.object({
18
17
  query: /* #__PURE__ */ zod_1.z
@@ -119,11 +118,9 @@ function _getPrompt(documentContents, attributeInfo, allowedComparators, allowed
119
118
  outputParser,
120
119
  });
121
120
  }
122
- function loadQueryConstructorChain(opts) {
121
+ function loadQueryConstructorRunnable(opts) {
123
122
  const prompt = _getPrompt(opts.documentContents, opts.attributeInfo, opts.allowedComparators, opts.allowedOperators, opts.examples);
124
- return new llm_chain_js_1.LLMChain({
125
- llm: opts.llm,
126
- prompt,
127
- });
123
+ const outputParser = StructuredQueryOutputParser.fromComponents(opts.allowedComparators, opts.allowedOperators);
124
+ return prompt.pipe(opts.llm).pipe(outputParser);
128
125
  }
129
- exports.loadQueryConstructorChain = loadQueryConstructorChain;
126
+ exports.loadQueryConstructorRunnable = loadQueryConstructorRunnable;
@@ -1,10 +1,9 @@
1
- import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
2
1
  import { z } from "zod";
2
+ import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
3
3
  import { InputValues } from "@langchain/core/utils/types";
4
4
  import { QueryTransformer, TraverseType } from "./parser.js";
5
5
  import { Comparator, Operator, StructuredQuery } from "./ir.js";
6
6
  import { DEFAULT_EXAMPLES, DEFAULT_PREFIX, DEFAULT_SCHEMA, DEFAULT_SUFFIX, EXAMPLE_PROMPT } from "./prompt.js";
7
- import { LLMChain } from "../llm_chain.js";
8
7
  import { AsymmetricStructuredOutputParser } from "../../output_parsers/structured.js";
9
8
  import { AttributeInfo } from "../../schema/query_constructor.js";
10
9
  export { QueryTransformer, type TraverseType };
@@ -50,7 +49,7 @@ export declare function formatAttributeInfo(info: AttributeInfo[]): string;
50
49
  /**
51
50
  * A type that represents options for the query constructor chain.
52
51
  */
53
- export type QueryConstructorChainOptions = {
52
+ export type QueryConstructorRunnableOptions = {
54
53
  llm: BaseLanguageModelInterface;
55
54
  documentContents: string;
56
55
  attributeInfo: AttributeInfo[];
@@ -58,4 +57,6 @@ export type QueryConstructorChainOptions = {
58
57
  allowedComparators?: Comparator[];
59
58
  allowedOperators?: Operator[];
60
59
  };
61
- export declare function loadQueryConstructorChain(opts: QueryConstructorChainOptions): LLMChain<string, BaseLanguageModelInterface<any, import("@langchain/core/language_models/base").BaseLanguageModelCallOptions>>;
60
+ /** @deprecated */
61
+ export type QueryConstructorChainOptions = QueryConstructorRunnableOptions;
62
+ export declare function loadQueryConstructorRunnable(opts: QueryConstructorRunnableOptions): import("@langchain/core/runnables").Runnable<any, StructuredQuery, import("@langchain/core/runnables").RunnableConfig>;
@@ -3,7 +3,6 @@ import { interpolateFString, FewShotPromptTemplate, } from "@langchain/core/prom
3
3
  import { QueryTransformer } from "./parser.js";
4
4
  import { Comparators, Operators, StructuredQuery, } from "./ir.js";
5
5
  import { DEFAULT_EXAMPLES, DEFAULT_PREFIX, DEFAULT_SCHEMA, DEFAULT_SUFFIX, EXAMPLE_PROMPT, } from "./prompt.js";
6
- import { LLMChain } from "../llm_chain.js";
7
6
  import { AsymmetricStructuredOutputParser } from "../../output_parsers/structured.js";
8
7
  export { QueryTransformer };
9
8
  export { DEFAULT_EXAMPLES, DEFAULT_PREFIX, DEFAULT_SCHEMA, DEFAULT_SUFFIX, EXAMPLE_PROMPT, };
@@ -110,10 +109,8 @@ function _getPrompt(documentContents, attributeInfo, allowedComparators, allowed
110
109
  outputParser,
111
110
  });
112
111
  }
113
- export function loadQueryConstructorChain(opts) {
112
+ export function loadQueryConstructorRunnable(opts) {
114
113
  const prompt = _getPrompt(opts.documentContents, opts.attributeInfo, opts.allowedComparators, opts.allowedOperators, opts.examples);
115
- return new LLMChain({
116
- llm: opts.llm,
117
- prompt,
118
- });
114
+ const outputParser = StructuredQueryOutputParser.fromComponents(opts.allowedComparators, opts.allowedOperators);
115
+ return prompt.pipe(opts.llm).pipe(outputParser);
119
116
  }
@@ -1,141 +1,17 @@
1
1
  "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.StructuredQuery = exports.Operation = exports.Comparison = exports.FilterDirective = exports.Expression = exports.Visitor = exports.Comparators = exports.Operators = void 0;
4
- exports.Operators = {
5
- and: "and",
6
- or: "or",
7
- not: "not",
8
- };
9
- exports.Comparators = {
10
- eq: "eq",
11
- ne: "ne",
12
- lt: "lt",
13
- gt: "gt",
14
- lte: "lte",
15
- gte: "gte",
16
- };
17
- /**
18
- * Abstract class for visiting expressions. Subclasses must implement
19
- * visitOperation, visitComparison, and visitStructuredQuery methods.
20
- */
21
- class Visitor {
22
- }
23
- exports.Visitor = Visitor;
24
- /**
25
- * Abstract class representing an expression. Subclasses must implement
26
- * the exprName property and the accept method.
27
- */
28
- class Expression {
29
- accept(visitor) {
30
- if (this.exprName === "Operation") {
31
- return visitor.visitOperation(this);
32
- }
33
- else if (this.exprName === "Comparison") {
34
- return visitor.visitComparison(this);
35
- }
36
- else if (this.exprName === "StructuredQuery") {
37
- return visitor.visitStructuredQuery(this);
38
- }
39
- else {
40
- throw new Error("Unknown Expression type");
41
- }
42
- }
43
- }
44
- exports.Expression = Expression;
45
- /**
46
- * Abstract class representing a filter directive. It extends the
47
- * Expression class.
48
- */
49
- class FilterDirective extends Expression {
50
- }
51
- exports.FilterDirective = FilterDirective;
52
- /**
53
- * Class representing a comparison filter directive. It extends the
54
- * FilterDirective class.
55
- */
56
- class Comparison extends FilterDirective {
57
- constructor(comparator, attribute, value) {
58
- super();
59
- Object.defineProperty(this, "comparator", {
60
- enumerable: true,
61
- configurable: true,
62
- writable: true,
63
- value: comparator
64
- });
65
- Object.defineProperty(this, "attribute", {
66
- enumerable: true,
67
- configurable: true,
68
- writable: true,
69
- value: attribute
70
- });
71
- Object.defineProperty(this, "value", {
72
- enumerable: true,
73
- configurable: true,
74
- writable: true,
75
- value: value
76
- });
77
- Object.defineProperty(this, "exprName", {
78
- enumerable: true,
79
- configurable: true,
80
- writable: true,
81
- value: "Comparison"
82
- });
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
83
7
  }
84
- }
85
- exports.Comparison = Comparison;
86
- /**
87
- * Class representing an operation filter directive. It extends the
88
- * FilterDirective class.
89
- */
90
- class Operation extends FilterDirective {
91
- constructor(operator, args) {
92
- super();
93
- Object.defineProperty(this, "operator", {
94
- enumerable: true,
95
- configurable: true,
96
- writable: true,
97
- value: operator
98
- });
99
- Object.defineProperty(this, "args", {
100
- enumerable: true,
101
- configurable: true,
102
- writable: true,
103
- value: args
104
- });
105
- Object.defineProperty(this, "exprName", {
106
- enumerable: true,
107
- configurable: true,
108
- writable: true,
109
- value: "Operation"
110
- });
111
- }
112
- }
113
- exports.Operation = Operation;
114
- /**
115
- * Class representing a structured query expression. It extends the
116
- * Expression class.
117
- */
118
- class StructuredQuery extends Expression {
119
- constructor(query, filter) {
120
- super();
121
- Object.defineProperty(this, "query", {
122
- enumerable: true,
123
- configurable: true,
124
- writable: true,
125
- value: query
126
- });
127
- Object.defineProperty(this, "filter", {
128
- enumerable: true,
129
- configurable: true,
130
- writable: true,
131
- value: filter
132
- });
133
- Object.defineProperty(this, "exprName", {
134
- enumerable: true,
135
- configurable: true,
136
- writable: true,
137
- value: "StructuredQuery"
138
- });
139
- }
140
- }
141
- exports.StructuredQuery = StructuredQuery;
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("@langchain/core/structured_query"), exports);