langchain 0.1.36 → 0.1.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chains/conversational_retrieval_chain.cjs +61 -19
- package/dist/chains/conversational_retrieval_chain.d.ts +61 -19
- package/dist/chains/conversational_retrieval_chain.js +61 -19
- package/dist/chains/llm_chain.cjs +10 -5
- package/dist/chains/llm_chain.d.ts +10 -5
- package/dist/chains/llm_chain.js +10 -5
- package/dist/chains/retrieval_qa.cjs +23 -14
- package/dist/chains/retrieval_qa.d.ts +23 -14
- package/dist/chains/retrieval_qa.js +23 -14
- package/dist/document_loaders/fs/unstructured.cjs +1 -1
- package/dist/document_loaders/fs/unstructured.js +1 -1
- package/dist/document_loaders/web/browserbase.cjs +87 -0
- package/dist/document_loaders/web/browserbase.d.ts +49 -0
- package/dist/document_loaders/web/browserbase.js +80 -0
- package/dist/document_loaders/web/s3.cjs +2 -2
- package/dist/document_loaders/web/s3.js +2 -2
- package/dist/load/import_constants.cjs +1 -0
- package/dist/load/import_constants.js +1 -0
- package/dist/smith/config.d.ts +4 -4
- package/dist/vectorstores/qdrant.cjs +2 -0
- package/dist/vectorstores/qdrant.js +2 -0
- package/document_loaders/web/browserbase.cjs +1 -0
- package/document_loaders/web/browserbase.d.cts +1 -0
- package/document_loaders/web/browserbase.d.ts +1 -0
- package/document_loaders/web/browserbase.js +1 -0
- package/package.json +19 -1
|
@@ -13,37 +13,79 @@ Chat History:
|
|
|
13
13
|
Follow Up Input: {question}
|
|
14
14
|
Standalone question:`;
|
|
15
15
|
/**
|
|
16
|
+
* @deprecated This class will be removed in 0.3.0. See below for an example implementation using
|
|
17
|
+
* `createRetrievalChain`.
|
|
18
|
+
*
|
|
16
19
|
* Class for conducting conversational question-answering tasks with a
|
|
17
20
|
* retrieval component. Extends the BaseChain class and implements the
|
|
18
21
|
* ConversationalRetrievalQAChainInput interface.
|
|
19
22
|
* @example
|
|
20
23
|
* ```typescript
|
|
21
|
-
*
|
|
22
|
-
*
|
|
23
|
-
*
|
|
24
|
-
*
|
|
25
|
-
*
|
|
26
|
-
*
|
|
24
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
25
|
+
* import {
|
|
26
|
+
* ChatPromptTemplate,
|
|
27
|
+
* MessagesPlaceholder,
|
|
28
|
+
* } from "@langchain/core/prompts";
|
|
29
|
+
* import { BaseMessage } from "@langchain/core/messages";
|
|
30
|
+
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
31
|
+
* import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
|
|
32
|
+
* import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
27
33
|
*
|
|
28
|
-
* const
|
|
34
|
+
* const retriever = ...your retriever;
|
|
35
|
+
* const llm = new ChatAnthropic();
|
|
29
36
|
*
|
|
30
|
-
*
|
|
31
|
-
*
|
|
32
|
-
*
|
|
33
|
-
*
|
|
37
|
+
* // Contextualize question
|
|
38
|
+
* const contextualizeQSystemPrompt = `
|
|
39
|
+
* Given a chat history and the latest user question
|
|
40
|
+
* which might reference context in the chat history,
|
|
41
|
+
* formulate a standalone question which can be understood
|
|
42
|
+
* without the chat history. Do NOT answer the question, just
|
|
43
|
+
* reformulate it if needed and otherwise return it as is.`;
|
|
44
|
+
* const contextualizeQPrompt = ChatPromptTemplate.fromMessages([
|
|
45
|
+
* ["system", contextualizeQSystemPrompt],
|
|
46
|
+
* new MessagesPlaceholder("chat_history"),
|
|
47
|
+
* ["human", "{input}"],
|
|
48
|
+
* ]);
|
|
49
|
+
* const historyAwareRetriever = await createHistoryAwareRetriever({
|
|
50
|
+
* llm,
|
|
51
|
+
* retriever,
|
|
52
|
+
* rephrasePrompt: contextualizeQPrompt,
|
|
53
|
+
* });
|
|
34
54
|
*
|
|
35
|
-
*
|
|
55
|
+
* // Answer question
|
|
56
|
+
* const qaSystemPrompt = `
|
|
57
|
+
* You are an assistant for question-answering tasks. Use
|
|
58
|
+
* the following pieces of retrieved context to answer the
|
|
59
|
+
* question. If you don't know the answer, just say that you
|
|
60
|
+
* don't know. Use three sentences maximum and keep the answer
|
|
61
|
+
* concise.
|
|
62
|
+
* \n\n
|
|
63
|
+
* {context}`;
|
|
64
|
+
* const qaPrompt = ChatPromptTemplate.fromMessages([
|
|
65
|
+
* ["system", qaSystemPrompt],
|
|
66
|
+
* new MessagesPlaceholder("chat_history"),
|
|
67
|
+
* ["human", "{input}"],
|
|
68
|
+
* ]);
|
|
36
69
|
*
|
|
37
|
-
*
|
|
38
|
-
*
|
|
70
|
+
* // Below we use createStuffDocuments_chain to feed all retrieved context
|
|
71
|
+
* // into the LLM. Note that we can also use StuffDocumentsChain and other
|
|
72
|
+
* // instances of BaseCombineDocumentsChain.
|
|
73
|
+
* const questionAnswerChain = await createStuffDocumentsChain({
|
|
74
|
+
* llm,
|
|
75
|
+
* prompt: qaPrompt,
|
|
76
|
+
* });
|
|
39
77
|
*
|
|
40
|
-
* const
|
|
41
|
-
*
|
|
42
|
-
*
|
|
43
|
-
* chat_history: chatHistory,
|
|
78
|
+
* const ragChain = await createRetrievalChain({
|
|
79
|
+
* retriever: historyAwareRetriever,
|
|
80
|
+
* combineDocsChain: questionAnswerChain,
|
|
44
81
|
* });
|
|
45
|
-
* console.log(followUpRes);
|
|
46
82
|
*
|
|
83
|
+
* // Usage:
|
|
84
|
+
* const chat_history: BaseMessage[] = [];
|
|
85
|
+
* const response = await ragChain.invoke({
|
|
86
|
+
* chat_history,
|
|
87
|
+
* input: "...",
|
|
88
|
+
* });
|
|
47
89
|
* ```
|
|
48
90
|
*/
|
|
49
91
|
class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
|
|
@@ -21,37 +21,79 @@ export interface ConversationalRetrievalQAChainInput extends ChainInputs {
|
|
|
21
21
|
inputKey?: string;
|
|
22
22
|
}
|
|
23
23
|
/**
|
|
24
|
+
* @deprecated This class will be removed in 0.3.0. See below for an example implementation using
|
|
25
|
+
* `createRetrievalChain`.
|
|
26
|
+
*
|
|
24
27
|
* Class for conducting conversational question-answering tasks with a
|
|
25
28
|
* retrieval component. Extends the BaseChain class and implements the
|
|
26
29
|
* ConversationalRetrievalQAChainInput interface.
|
|
27
30
|
* @example
|
|
28
31
|
* ```typescript
|
|
29
|
-
*
|
|
30
|
-
*
|
|
31
|
-
*
|
|
32
|
-
*
|
|
33
|
-
*
|
|
34
|
-
*
|
|
32
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
33
|
+
* import {
|
|
34
|
+
* ChatPromptTemplate,
|
|
35
|
+
* MessagesPlaceholder,
|
|
36
|
+
* } from "@langchain/core/prompts";
|
|
37
|
+
* import { BaseMessage } from "@langchain/core/messages";
|
|
38
|
+
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
39
|
+
* import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
|
|
40
|
+
* import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
35
41
|
*
|
|
36
|
-
* const
|
|
42
|
+
* const retriever = ...your retriever;
|
|
43
|
+
* const llm = new ChatAnthropic();
|
|
37
44
|
*
|
|
38
|
-
*
|
|
39
|
-
*
|
|
40
|
-
*
|
|
41
|
-
*
|
|
45
|
+
* // Contextualize question
|
|
46
|
+
* const contextualizeQSystemPrompt = `
|
|
47
|
+
* Given a chat history and the latest user question
|
|
48
|
+
* which might reference context in the chat history,
|
|
49
|
+
* formulate a standalone question which can be understood
|
|
50
|
+
* without the chat history. Do NOT answer the question, just
|
|
51
|
+
* reformulate it if needed and otherwise return it as is.`;
|
|
52
|
+
* const contextualizeQPrompt = ChatPromptTemplate.fromMessages([
|
|
53
|
+
* ["system", contextualizeQSystemPrompt],
|
|
54
|
+
* new MessagesPlaceholder("chat_history"),
|
|
55
|
+
* ["human", "{input}"],
|
|
56
|
+
* ]);
|
|
57
|
+
* const historyAwareRetriever = await createHistoryAwareRetriever({
|
|
58
|
+
* llm,
|
|
59
|
+
* retriever,
|
|
60
|
+
* rephrasePrompt: contextualizeQPrompt,
|
|
61
|
+
* });
|
|
42
62
|
*
|
|
43
|
-
*
|
|
63
|
+
* // Answer question
|
|
64
|
+
* const qaSystemPrompt = `
|
|
65
|
+
* You are an assistant for question-answering tasks. Use
|
|
66
|
+
* the following pieces of retrieved context to answer the
|
|
67
|
+
* question. If you don't know the answer, just say that you
|
|
68
|
+
* don't know. Use three sentences maximum and keep the answer
|
|
69
|
+
* concise.
|
|
70
|
+
* \n\n
|
|
71
|
+
* {context}`;
|
|
72
|
+
* const qaPrompt = ChatPromptTemplate.fromMessages([
|
|
73
|
+
* ["system", qaSystemPrompt],
|
|
74
|
+
* new MessagesPlaceholder("chat_history"),
|
|
75
|
+
* ["human", "{input}"],
|
|
76
|
+
* ]);
|
|
44
77
|
*
|
|
45
|
-
*
|
|
46
|
-
*
|
|
78
|
+
* // Below we use createStuffDocuments_chain to feed all retrieved context
|
|
79
|
+
* // into the LLM. Note that we can also use StuffDocumentsChain and other
|
|
80
|
+
* // instances of BaseCombineDocumentsChain.
|
|
81
|
+
* const questionAnswerChain = await createStuffDocumentsChain({
|
|
82
|
+
* llm,
|
|
83
|
+
* prompt: qaPrompt,
|
|
84
|
+
* });
|
|
47
85
|
*
|
|
48
|
-
* const
|
|
49
|
-
*
|
|
50
|
-
*
|
|
51
|
-
* chat_history: chatHistory,
|
|
86
|
+
* const ragChain = await createRetrievalChain({
|
|
87
|
+
* retriever: historyAwareRetriever,
|
|
88
|
+
* combineDocsChain: questionAnswerChain,
|
|
52
89
|
* });
|
|
53
|
-
* console.log(followUpRes);
|
|
54
90
|
*
|
|
91
|
+
* // Usage:
|
|
92
|
+
* const chat_history: BaseMessage[] = [];
|
|
93
|
+
* const response = await ragChain.invoke({
|
|
94
|
+
* chat_history,
|
|
95
|
+
* input: "...",
|
|
96
|
+
* });
|
|
55
97
|
* ```
|
|
56
98
|
*/
|
|
57
99
|
export declare class ConversationalRetrievalQAChain extends BaseChain implements ConversationalRetrievalQAChainInput {
|
|
@@ -10,37 +10,79 @@ Chat History:
|
|
|
10
10
|
Follow Up Input: {question}
|
|
11
11
|
Standalone question:`;
|
|
12
12
|
/**
|
|
13
|
+
* @deprecated This class will be removed in 0.3.0. See below for an example implementation using
|
|
14
|
+
* `createRetrievalChain`.
|
|
15
|
+
*
|
|
13
16
|
* Class for conducting conversational question-answering tasks with a
|
|
14
17
|
* retrieval component. Extends the BaseChain class and implements the
|
|
15
18
|
* ConversationalRetrievalQAChainInput interface.
|
|
16
19
|
* @example
|
|
17
20
|
* ```typescript
|
|
18
|
-
*
|
|
19
|
-
*
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
-
*
|
|
23
|
-
*
|
|
21
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
22
|
+
* import {
|
|
23
|
+
* ChatPromptTemplate,
|
|
24
|
+
* MessagesPlaceholder,
|
|
25
|
+
* } from "@langchain/core/prompts";
|
|
26
|
+
* import { BaseMessage } from "@langchain/core/messages";
|
|
27
|
+
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
28
|
+
* import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
|
|
29
|
+
* import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
24
30
|
*
|
|
25
|
-
* const
|
|
31
|
+
* const retriever = ...your retriever;
|
|
32
|
+
* const llm = new ChatAnthropic();
|
|
26
33
|
*
|
|
27
|
-
*
|
|
28
|
-
*
|
|
29
|
-
*
|
|
30
|
-
*
|
|
34
|
+
* // Contextualize question
|
|
35
|
+
* const contextualizeQSystemPrompt = `
|
|
36
|
+
* Given a chat history and the latest user question
|
|
37
|
+
* which might reference context in the chat history,
|
|
38
|
+
* formulate a standalone question which can be understood
|
|
39
|
+
* without the chat history. Do NOT answer the question, just
|
|
40
|
+
* reformulate it if needed and otherwise return it as is.`;
|
|
41
|
+
* const contextualizeQPrompt = ChatPromptTemplate.fromMessages([
|
|
42
|
+
* ["system", contextualizeQSystemPrompt],
|
|
43
|
+
* new MessagesPlaceholder("chat_history"),
|
|
44
|
+
* ["human", "{input}"],
|
|
45
|
+
* ]);
|
|
46
|
+
* const historyAwareRetriever = await createHistoryAwareRetriever({
|
|
47
|
+
* llm,
|
|
48
|
+
* retriever,
|
|
49
|
+
* rephrasePrompt: contextualizeQPrompt,
|
|
50
|
+
* });
|
|
31
51
|
*
|
|
32
|
-
*
|
|
52
|
+
* // Answer question
|
|
53
|
+
* const qaSystemPrompt = `
|
|
54
|
+
* You are an assistant for question-answering tasks. Use
|
|
55
|
+
* the following pieces of retrieved context to answer the
|
|
56
|
+
* question. If you don't know the answer, just say that you
|
|
57
|
+
* don't know. Use three sentences maximum and keep the answer
|
|
58
|
+
* concise.
|
|
59
|
+
* \n\n
|
|
60
|
+
* {context}`;
|
|
61
|
+
* const qaPrompt = ChatPromptTemplate.fromMessages([
|
|
62
|
+
* ["system", qaSystemPrompt],
|
|
63
|
+
* new MessagesPlaceholder("chat_history"),
|
|
64
|
+
* ["human", "{input}"],
|
|
65
|
+
* ]);
|
|
33
66
|
*
|
|
34
|
-
*
|
|
35
|
-
*
|
|
67
|
+
* // Below we use createStuffDocuments_chain to feed all retrieved context
|
|
68
|
+
* // into the LLM. Note that we can also use StuffDocumentsChain and other
|
|
69
|
+
* // instances of BaseCombineDocumentsChain.
|
|
70
|
+
* const questionAnswerChain = await createStuffDocumentsChain({
|
|
71
|
+
* llm,
|
|
72
|
+
* prompt: qaPrompt,
|
|
73
|
+
* });
|
|
36
74
|
*
|
|
37
|
-
* const
|
|
38
|
-
*
|
|
39
|
-
*
|
|
40
|
-
* chat_history: chatHistory,
|
|
75
|
+
* const ragChain = await createRetrievalChain({
|
|
76
|
+
* retriever: historyAwareRetriever,
|
|
77
|
+
* combineDocsChain: questionAnswerChain,
|
|
41
78
|
* });
|
|
42
|
-
* console.log(followUpRes);
|
|
43
79
|
*
|
|
80
|
+
* // Usage:
|
|
81
|
+
* const chat_history: BaseMessage[] = [];
|
|
82
|
+
* const response = await ragChain.invoke({
|
|
83
|
+
* chat_history,
|
|
84
|
+
* input: "...",
|
|
85
|
+
* });
|
|
44
86
|
* ```
|
|
45
87
|
*/
|
|
46
88
|
export class ConversationalRetrievalQAChain extends BaseChain {
|
|
@@ -29,16 +29,21 @@ function _getLanguageModel(llmLike) {
|
|
|
29
29
|
}
|
|
30
30
|
}
|
|
31
31
|
/**
|
|
32
|
+
* @deprecated This class will be removed in 0.3.0. Use the LangChain Expression Language (LCEL) instead.
|
|
33
|
+
* See the example below for how to use LCEL with the LLMChain class:
|
|
34
|
+
*
|
|
32
35
|
* Chain to run queries against LLMs.
|
|
33
36
|
*
|
|
34
37
|
* @example
|
|
35
38
|
* ```ts
|
|
36
|
-
* import {
|
|
37
|
-
* import {
|
|
38
|
-
*
|
|
39
|
+
* import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
40
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
41
|
+
*
|
|
42
|
+
* const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
|
|
43
|
+
* const llm = new ChatOpenAI();
|
|
44
|
+
* const chain = prompt.pipe(llm);
|
|
39
45
|
*
|
|
40
|
-
* const
|
|
41
|
-
* const llm = new LLMChain({ llm: new OpenAI(), prompt });
|
|
46
|
+
* const response = await chain.invoke({ adjective: "funny" });
|
|
42
47
|
* ```
|
|
43
48
|
*/
|
|
44
49
|
class LLMChain extends base_js_1.BaseChain {
|
|
@@ -29,16 +29,21 @@ export interface LLMChainInput<T extends string | object = string, Model extends
|
|
|
29
29
|
outputKey?: string;
|
|
30
30
|
}
|
|
31
31
|
/**
|
|
32
|
+
* @deprecated This class will be removed in 0.3.0. Use the LangChain Expression Language (LCEL) instead.
|
|
33
|
+
* See the example below for how to use LCEL with the LLMChain class:
|
|
34
|
+
*
|
|
32
35
|
* Chain to run queries against LLMs.
|
|
33
36
|
*
|
|
34
37
|
* @example
|
|
35
38
|
* ```ts
|
|
36
|
-
* import {
|
|
37
|
-
* import {
|
|
38
|
-
*
|
|
39
|
+
* import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
40
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
41
|
+
*
|
|
42
|
+
* const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
|
|
43
|
+
* const llm = new ChatOpenAI();
|
|
44
|
+
* const chain = prompt.pipe(llm);
|
|
39
45
|
*
|
|
40
|
-
* const
|
|
41
|
-
* const llm = new LLMChain({ llm: new OpenAI(), prompt });
|
|
46
|
+
* const response = await chain.invoke({ adjective: "funny" });
|
|
42
47
|
* ```
|
|
43
48
|
*/
|
|
44
49
|
export declare class LLMChain<T extends string | object = string, Model extends LLMType = LLMType> extends BaseChain implements LLMChainInput<T> {
|
package/dist/chains/llm_chain.js
CHANGED
|
@@ -26,16 +26,21 @@ function _getLanguageModel(llmLike) {
|
|
|
26
26
|
}
|
|
27
27
|
}
|
|
28
28
|
/**
|
|
29
|
+
* @deprecated This class will be removed in 0.3.0. Use the LangChain Expression Language (LCEL) instead.
|
|
30
|
+
* See the example below for how to use LCEL with the LLMChain class:
|
|
31
|
+
*
|
|
29
32
|
* Chain to run queries against LLMs.
|
|
30
33
|
*
|
|
31
34
|
* @example
|
|
32
35
|
* ```ts
|
|
33
|
-
* import {
|
|
34
|
-
* import {
|
|
35
|
-
*
|
|
36
|
+
* import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
37
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
38
|
+
*
|
|
39
|
+
* const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
|
|
40
|
+
* const llm = new ChatOpenAI();
|
|
41
|
+
* const chain = prompt.pipe(llm);
|
|
36
42
|
*
|
|
37
|
-
* const
|
|
38
|
-
* const llm = new LLMChain({ llm: new OpenAI(), prompt });
|
|
43
|
+
* const response = await chain.invoke({ adjective: "funny" });
|
|
39
44
|
* ```
|
|
40
45
|
*/
|
|
41
46
|
export class LLMChain extends BaseChain {
|
|
@@ -4,28 +4,37 @@ exports.RetrievalQAChain = void 0;
|
|
|
4
4
|
const base_js_1 = require("./base.cjs");
|
|
5
5
|
const load_js_1 = require("./question_answering/load.cjs");
|
|
6
6
|
/**
|
|
7
|
+
* @deprecated This class will be removed in 0.3.0. See below for an example implementation using
|
|
8
|
+
* `createRetrievalChain`:
|
|
7
9
|
* Class representing a chain for performing question-answering tasks with
|
|
8
10
|
* a retrieval component.
|
|
9
11
|
* @example
|
|
10
12
|
* ```typescript
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
16
|
-
*
|
|
17
|
-
*
|
|
18
|
-
*
|
|
13
|
+
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
14
|
+
* import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
15
|
+
* import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
16
|
+
* import { MemoryVectorStore } from "langchain/vectorstores/memory";
|
|
17
|
+
*
|
|
18
|
+
* const documents = [...your documents here];
|
|
19
|
+
* const embeddings = ...your embeddings model;
|
|
20
|
+
* const llm = ...your LLM model;
|
|
19
21
|
*
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
+
* const vectorstore = await MemoryVectorStore.fromDocuments(
|
|
23
|
+
* documents,
|
|
24
|
+
* embeddings
|
|
25
|
+
* );
|
|
26
|
+
* const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
|
|
22
27
|
*
|
|
23
|
-
*
|
|
24
|
-
*
|
|
25
|
-
*
|
|
28
|
+
* const combineDocsChain = await createStuffDocumentsChain({
|
|
29
|
+
* llm,
|
|
30
|
+
* prompt,
|
|
26
31
|
* });
|
|
27
|
-
*
|
|
32
|
+
* const retriever = vectorstore.asRetriever();
|
|
28
33
|
*
|
|
34
|
+
* const retrievalChain = await createRetrievalChain({
|
|
35
|
+
* combineDocsChain,
|
|
36
|
+
* retriever,
|
|
37
|
+
* });
|
|
29
38
|
* ```
|
|
30
39
|
*/
|
|
31
40
|
class RetrievalQAChain extends base_js_1.BaseChain {
|
|
@@ -16,28 +16,37 @@ export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
|
|
|
16
16
|
returnSourceDocuments?: boolean;
|
|
17
17
|
}
|
|
18
18
|
/**
|
|
19
|
+
* @deprecated This class will be removed in 0.3.0. See below for an example implementation using
|
|
20
|
+
* `createRetrievalChain`:
|
|
19
21
|
* Class representing a chain for performing question-answering tasks with
|
|
20
22
|
* a retrieval component.
|
|
21
23
|
* @example
|
|
22
24
|
* ```typescript
|
|
23
|
-
*
|
|
24
|
-
*
|
|
25
|
-
*
|
|
26
|
-
*
|
|
27
|
-
*
|
|
28
|
-
*
|
|
29
|
-
*
|
|
30
|
-
*
|
|
25
|
+
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
26
|
+
* import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
27
|
+
* import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
28
|
+
* import { MemoryVectorStore } from "langchain/vectorstores/memory";
|
|
29
|
+
*
|
|
30
|
+
* const documents = [...your documents here];
|
|
31
|
+
* const embeddings = ...your embeddings model;
|
|
32
|
+
* const llm = ...your LLM model;
|
|
31
33
|
*
|
|
32
|
-
*
|
|
33
|
-
*
|
|
34
|
+
* const vectorstore = await MemoryVectorStore.fromDocuments(
|
|
35
|
+
* documents,
|
|
36
|
+
* embeddings
|
|
37
|
+
* );
|
|
38
|
+
* const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
|
|
34
39
|
*
|
|
35
|
-
*
|
|
36
|
-
*
|
|
37
|
-
*
|
|
40
|
+
* const combineDocsChain = await createStuffDocumentsChain({
|
|
41
|
+
* llm,
|
|
42
|
+
* prompt,
|
|
38
43
|
* });
|
|
39
|
-
*
|
|
44
|
+
* const retriever = vectorstore.asRetriever();
|
|
40
45
|
*
|
|
46
|
+
* const retrievalChain = await createRetrievalChain({
|
|
47
|
+
* combineDocsChain,
|
|
48
|
+
* retriever,
|
|
49
|
+
* });
|
|
41
50
|
* ```
|
|
42
51
|
*/
|
|
43
52
|
export declare class RetrievalQAChain extends BaseChain implements RetrievalQAChainInput {
|
|
@@ -1,28 +1,37 @@
|
|
|
1
1
|
import { BaseChain } from "./base.js";
|
|
2
2
|
import { loadQAStuffChain, } from "./question_answering/load.js";
|
|
3
3
|
/**
|
|
4
|
+
* @deprecated This class will be removed in 0.3.0. See below for an example implementation using
|
|
5
|
+
* `createRetrievalChain`:
|
|
4
6
|
* Class representing a chain for performing question-answering tasks with
|
|
5
7
|
* a retrieval component.
|
|
6
8
|
* @example
|
|
7
9
|
* ```typescript
|
|
8
|
-
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
10
|
+
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
11
|
+
* import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
12
|
+
* import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
13
|
+
* import { MemoryVectorStore } from "langchain/vectorstores/memory";
|
|
14
|
+
*
|
|
15
|
+
* const documents = [...your documents here];
|
|
16
|
+
* const embeddings = ...your embeddings model;
|
|
17
|
+
* const llm = ...your LLM model;
|
|
16
18
|
*
|
|
17
|
-
*
|
|
18
|
-
*
|
|
19
|
+
* const vectorstore = await MemoryVectorStore.fromDocuments(
|
|
20
|
+
* documents,
|
|
21
|
+
* embeddings
|
|
22
|
+
* );
|
|
23
|
+
* const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
|
|
19
24
|
*
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
-
*
|
|
25
|
+
* const combineDocsChain = await createStuffDocumentsChain({
|
|
26
|
+
* llm,
|
|
27
|
+
* prompt,
|
|
23
28
|
* });
|
|
24
|
-
*
|
|
29
|
+
* const retriever = vectorstore.asRetriever();
|
|
25
30
|
*
|
|
31
|
+
* const retrievalChain = await createRetrievalChain({
|
|
32
|
+
* combineDocsChain,
|
|
33
|
+
* retriever,
|
|
34
|
+
* });
|
|
26
35
|
* ```
|
|
27
36
|
*/
|
|
28
37
|
export class RetrievalQAChain extends BaseChain {
|
|
@@ -239,7 +239,7 @@ class UnstructuredLoader extends base_js_1.BaseDocumentLoader {
|
|
|
239
239
|
const documents = [];
|
|
240
240
|
for (const element of elements) {
|
|
241
241
|
const { metadata, text } = element;
|
|
242
|
-
if (typeof text === "string") {
|
|
242
|
+
if (typeof text === "string" && text !== "") {
|
|
243
243
|
documents.push(new documents_1.Document({
|
|
244
244
|
pageContent: text,
|
|
245
245
|
metadata: {
|
|
@@ -235,7 +235,7 @@ export class UnstructuredLoader extends BaseDocumentLoader {
|
|
|
235
235
|
const documents = [];
|
|
236
236
|
for (const element of elements) {
|
|
237
237
|
const { metadata, text } = element;
|
|
238
|
-
if (typeof text === "string") {
|
|
238
|
+
if (typeof text === "string" && text !== "") {
|
|
239
239
|
documents.push(new Document({
|
|
240
240
|
pageContent: text,
|
|
241
241
|
metadata: {
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.BrowserbaseLoader = void 0;
|
|
7
|
+
const documents_1 = require("@langchain/core/documents");
|
|
8
|
+
const sdk_1 = __importDefault(require("@browserbasehq/sdk"));
|
|
9
|
+
const base_js_1 = require("../base.cjs");
|
|
10
|
+
/**
|
|
11
|
+
* Load pre-rendered web pages using a headless browser hosted on Browserbase.
|
|
12
|
+
*
|
|
13
|
+
* Depends on `@browserbasehq/sdk` package.
|
|
14
|
+
* Get your API key from https://browserbase.com
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* ```typescript
|
|
18
|
+
* import { BrowserbaseLoader } from "langchain/document_loaders/web/browserbase";
|
|
19
|
+
*
|
|
20
|
+
* const loader = new BrowserbaseLoader(["https://example.com"], {
|
|
21
|
+
* apiKey: process.env.BROWSERBASE_API_KEY,
|
|
22
|
+
* textContent: true,
|
|
23
|
+
* });
|
|
24
|
+
*
|
|
25
|
+
* const docs = await loader.load();
|
|
26
|
+
* ```
|
|
27
|
+
*
|
|
28
|
+
* @param {string[]} urls - The URLs of the web pages to load.
|
|
29
|
+
* @param {BrowserbaseLoaderOptions} [options] - Browserbase client options.
|
|
30
|
+
*/
|
|
31
|
+
class BrowserbaseLoader extends base_js_1.BaseDocumentLoader {
|
|
32
|
+
constructor(urls, options = {}) {
|
|
33
|
+
super();
|
|
34
|
+
Object.defineProperty(this, "urls", {
|
|
35
|
+
enumerable: true,
|
|
36
|
+
configurable: true,
|
|
37
|
+
writable: true,
|
|
38
|
+
value: void 0
|
|
39
|
+
});
|
|
40
|
+
Object.defineProperty(this, "options", {
|
|
41
|
+
enumerable: true,
|
|
42
|
+
configurable: true,
|
|
43
|
+
writable: true,
|
|
44
|
+
value: void 0
|
|
45
|
+
});
|
|
46
|
+
Object.defineProperty(this, "browserbase", {
|
|
47
|
+
enumerable: true,
|
|
48
|
+
configurable: true,
|
|
49
|
+
writable: true,
|
|
50
|
+
value: void 0
|
|
51
|
+
});
|
|
52
|
+
this.urls = urls;
|
|
53
|
+
this.options = options;
|
|
54
|
+
this.browserbase = new sdk_1.default(options.apiKey);
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Load pages from URLs.
|
|
58
|
+
*
|
|
59
|
+
* @returns {Promise<DocumentInterface[]>} - A promise which resolves to a list of documents.
|
|
60
|
+
*/
|
|
61
|
+
async load() {
|
|
62
|
+
const documents = [];
|
|
63
|
+
for await (const doc of this.lazyLoad()) {
|
|
64
|
+
documents.push(doc);
|
|
65
|
+
}
|
|
66
|
+
return documents;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Load pages from URLs.
|
|
70
|
+
*
|
|
71
|
+
* @returns {Generator<DocumentInterface>} - A generator that yields documents.
|
|
72
|
+
*/
|
|
73
|
+
async *lazyLoad() {
|
|
74
|
+
const pages = await this.browserbase.loadURLs(this.urls, this.options);
|
|
75
|
+
let index = 0;
|
|
76
|
+
for await (const page of pages) {
|
|
77
|
+
yield new documents_1.Document({
|
|
78
|
+
pageContent: page,
|
|
79
|
+
metadata: {
|
|
80
|
+
url: this.urls[index],
|
|
81
|
+
},
|
|
82
|
+
});
|
|
83
|
+
index += index + 1;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
exports.BrowserbaseLoader = BrowserbaseLoader;
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { Document, type DocumentInterface } from "@langchain/core/documents";
|
|
2
|
+
import Browserbase, { BrowserbaseLoadOptions } from "@browserbasehq/sdk";
|
|
3
|
+
import { BaseDocumentLoader } from "../base.js";
|
|
4
|
+
import type { DocumentLoader } from "../base.js";
|
|
5
|
+
interface BrowserbaseLoaderOptions extends BrowserbaseLoadOptions {
|
|
6
|
+
apiKey?: string;
|
|
7
|
+
}
|
|
8
|
+
/**
|
|
9
|
+
* Load pre-rendered web pages using a headless browser hosted on Browserbase.
|
|
10
|
+
*
|
|
11
|
+
* Depends on `@browserbasehq/sdk` package.
|
|
12
|
+
* Get your API key from https://browserbase.com
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* import { BrowserbaseLoader } from "langchain/document_loaders/web/browserbase";
|
|
17
|
+
*
|
|
18
|
+
* const loader = new BrowserbaseLoader(["https://example.com"], {
|
|
19
|
+
* apiKey: process.env.BROWSERBASE_API_KEY,
|
|
20
|
+
* textContent: true,
|
|
21
|
+
* });
|
|
22
|
+
*
|
|
23
|
+
* const docs = await loader.load();
|
|
24
|
+
* ```
|
|
25
|
+
*
|
|
26
|
+
* @param {string[]} urls - The URLs of the web pages to load.
|
|
27
|
+
* @param {BrowserbaseLoaderOptions} [options] - Browserbase client options.
|
|
28
|
+
*/
|
|
29
|
+
export declare class BrowserbaseLoader extends BaseDocumentLoader implements DocumentLoader {
|
|
30
|
+
urls: string[];
|
|
31
|
+
options: BrowserbaseLoaderOptions;
|
|
32
|
+
browserbase: Browserbase;
|
|
33
|
+
constructor(urls: string[], options?: BrowserbaseLoaderOptions);
|
|
34
|
+
/**
|
|
35
|
+
* Load pages from URLs.
|
|
36
|
+
*
|
|
37
|
+
* @returns {Promise<DocumentInterface[]>} - A promise which resolves to a list of documents.
|
|
38
|
+
*/
|
|
39
|
+
load(): Promise<DocumentInterface[]>;
|
|
40
|
+
/**
|
|
41
|
+
* Load pages from URLs.
|
|
42
|
+
*
|
|
43
|
+
* @returns {Generator<DocumentInterface>} - A generator that yields documents.
|
|
44
|
+
*/
|
|
45
|
+
lazyLoad(): AsyncGenerator<Document<{
|
|
46
|
+
url: string;
|
|
47
|
+
}>, void, unknown>;
|
|
48
|
+
}
|
|
49
|
+
export {};
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import { Document } from "@langchain/core/documents";
|
|
2
|
+
import Browserbase from "@browserbasehq/sdk";
|
|
3
|
+
import { BaseDocumentLoader } from "../base.js";
|
|
4
|
+
/**
|
|
5
|
+
* Load pre-rendered web pages using a headless browser hosted on Browserbase.
|
|
6
|
+
*
|
|
7
|
+
* Depends on `@browserbasehq/sdk` package.
|
|
8
|
+
* Get your API key from https://browserbase.com
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```typescript
|
|
12
|
+
* import { BrowserbaseLoader } from "langchain/document_loaders/web/browserbase";
|
|
13
|
+
*
|
|
14
|
+
* const loader = new BrowserbaseLoader(["https://example.com"], {
|
|
15
|
+
* apiKey: process.env.BROWSERBASE_API_KEY,
|
|
16
|
+
* textContent: true,
|
|
17
|
+
* });
|
|
18
|
+
*
|
|
19
|
+
* const docs = await loader.load();
|
|
20
|
+
* ```
|
|
21
|
+
*
|
|
22
|
+
* @param {string[]} urls - The URLs of the web pages to load.
|
|
23
|
+
* @param {BrowserbaseLoaderOptions} [options] - Browserbase client options.
|
|
24
|
+
*/
|
|
25
|
+
export class BrowserbaseLoader extends BaseDocumentLoader {
|
|
26
|
+
constructor(urls, options = {}) {
|
|
27
|
+
super();
|
|
28
|
+
Object.defineProperty(this, "urls", {
|
|
29
|
+
enumerable: true,
|
|
30
|
+
configurable: true,
|
|
31
|
+
writable: true,
|
|
32
|
+
value: void 0
|
|
33
|
+
});
|
|
34
|
+
Object.defineProperty(this, "options", {
|
|
35
|
+
enumerable: true,
|
|
36
|
+
configurable: true,
|
|
37
|
+
writable: true,
|
|
38
|
+
value: void 0
|
|
39
|
+
});
|
|
40
|
+
Object.defineProperty(this, "browserbase", {
|
|
41
|
+
enumerable: true,
|
|
42
|
+
configurable: true,
|
|
43
|
+
writable: true,
|
|
44
|
+
value: void 0
|
|
45
|
+
});
|
|
46
|
+
this.urls = urls;
|
|
47
|
+
this.options = options;
|
|
48
|
+
this.browserbase = new Browserbase(options.apiKey);
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Load pages from URLs.
|
|
52
|
+
*
|
|
53
|
+
* @returns {Promise<DocumentInterface[]>} - A promise which resolves to a list of documents.
|
|
54
|
+
*/
|
|
55
|
+
async load() {
|
|
56
|
+
const documents = [];
|
|
57
|
+
for await (const doc of this.lazyLoad()) {
|
|
58
|
+
documents.push(doc);
|
|
59
|
+
}
|
|
60
|
+
return documents;
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Load pages from URLs.
|
|
64
|
+
*
|
|
65
|
+
* @returns {Generator<DocumentInterface>} - A generator that yields documents.
|
|
66
|
+
*/
|
|
67
|
+
async *lazyLoad() {
|
|
68
|
+
const pages = await this.browserbase.loadURLs(this.urls, this.options);
|
|
69
|
+
let index = 0;
|
|
70
|
+
for await (const page of pages) {
|
|
71
|
+
yield new Document({
|
|
72
|
+
pageContent: page,
|
|
73
|
+
metadata: {
|
|
74
|
+
url: this.urls[index],
|
|
75
|
+
},
|
|
76
|
+
});
|
|
77
|
+
index += index + 1;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
@@ -148,8 +148,8 @@ class S3Loader extends base_js_1.BaseDocumentLoader {
|
|
|
148
148
|
const docs = await unstructuredLoader.load();
|
|
149
149
|
return docs;
|
|
150
150
|
}
|
|
151
|
-
catch {
|
|
152
|
-
throw new Error(`Failed to load file ${filePath} using unstructured loader
|
|
151
|
+
catch (e) {
|
|
152
|
+
throw new Error(`Failed to load file ${filePath} using unstructured loader: ${e.message}`);
|
|
153
153
|
}
|
|
154
154
|
}
|
|
155
155
|
}
|
|
@@ -122,8 +122,8 @@ export class S3Loader extends BaseDocumentLoader {
|
|
|
122
122
|
const docs = await unstructuredLoader.load();
|
|
123
123
|
return docs;
|
|
124
124
|
}
|
|
125
|
-
catch {
|
|
126
|
-
throw new Error(`Failed to load file ${filePath} using unstructured loader
|
|
125
|
+
catch (e) {
|
|
126
|
+
throw new Error(`Failed to load file ${filePath} using unstructured loader: ${e.message}`);
|
|
127
127
|
}
|
|
128
128
|
}
|
|
129
129
|
}
|
|
@@ -85,6 +85,7 @@ exports.optionalImportEntrypoints = [
|
|
|
85
85
|
"langchain/document_loaders/web/assemblyai",
|
|
86
86
|
"langchain/document_loaders/web/azure_blob_storage_container",
|
|
87
87
|
"langchain/document_loaders/web/azure_blob_storage_file",
|
|
88
|
+
"langchain/document_loaders/web/browserbase",
|
|
88
89
|
"langchain/document_loaders/web/cheerio",
|
|
89
90
|
"langchain/document_loaders/web/puppeteer",
|
|
90
91
|
"langchain/document_loaders/web/playwright",
|
|
@@ -82,6 +82,7 @@ export const optionalImportEntrypoints = [
|
|
|
82
82
|
"langchain/document_loaders/web/assemblyai",
|
|
83
83
|
"langchain/document_loaders/web/azure_blob_storage_container",
|
|
84
84
|
"langchain/document_loaders/web/azure_blob_storage_file",
|
|
85
|
+
"langchain/document_loaders/web/browserbase",
|
|
85
86
|
"langchain/document_loaders/web/cheerio",
|
|
86
87
|
"langchain/document_loaders/web/puppeteer",
|
|
87
88
|
"langchain/document_loaders/web/playwright",
|
package/dist/smith/config.d.ts
CHANGED
|
@@ -16,10 +16,10 @@ export type EvaluatorInputFormatter = ({ rawInput, rawPrediction, rawReferenceOu
|
|
|
16
16
|
rawReferenceOutput?: any;
|
|
17
17
|
run: Run;
|
|
18
18
|
}) => EvaluatorInputs;
|
|
19
|
-
export type DynamicRunEvaluatorParams = {
|
|
20
|
-
input:
|
|
21
|
-
prediction?:
|
|
22
|
-
reference?:
|
|
19
|
+
export type DynamicRunEvaluatorParams<Input extends Record<string, any> = Record<string, unknown>, Prediction extends Record<string, any> = Record<string, unknown>, Reference extends Record<string, any> = Record<string, unknown>> = {
|
|
20
|
+
input: Input;
|
|
21
|
+
prediction?: Prediction;
|
|
22
|
+
reference?: Reference;
|
|
23
23
|
run: Run;
|
|
24
24
|
example?: Example;
|
|
25
25
|
};
|
|
@@ -17,5 +17,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
17
17
|
const entrypoint_deprecation_js_1 = require("../util/entrypoint_deprecation.cjs");
|
|
18
18
|
/* #__PURE__ */ (0, entrypoint_deprecation_js_1.logVersion010MigrationWarning)({
|
|
19
19
|
oldEntrypointName: "vectorstores/qdrant",
|
|
20
|
+
newEntrypointName: "",
|
|
21
|
+
newPackageName: "@langchain/qdrant",
|
|
20
22
|
});
|
|
21
23
|
__exportStar(require("@langchain/community/vectorstores/qdrant"), exports);
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import { logVersion010MigrationWarning } from "../util/entrypoint_deprecation.js";
|
|
2
2
|
/* #__PURE__ */ logVersion010MigrationWarning({
|
|
3
3
|
oldEntrypointName: "vectorstores/qdrant",
|
|
4
|
+
newEntrypointName: "",
|
|
5
|
+
newPackageName: "@langchain/qdrant",
|
|
4
6
|
});
|
|
5
7
|
export * from "@langchain/community/vectorstores/qdrant";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../../dist/document_loaders/web/browserbase.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/document_loaders/web/browserbase.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/document_loaders/web/browserbase.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/document_loaders/web/browserbase.js'
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langchain",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.37",
|
|
4
4
|
"description": "Typescript bindings for langchain",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -546,6 +546,10 @@
|
|
|
546
546
|
"document_loaders/web/azure_blob_storage_file.js",
|
|
547
547
|
"document_loaders/web/azure_blob_storage_file.d.ts",
|
|
548
548
|
"document_loaders/web/azure_blob_storage_file.d.cts",
|
|
549
|
+
"document_loaders/web/browserbase.cjs",
|
|
550
|
+
"document_loaders/web/browserbase.js",
|
|
551
|
+
"document_loaders/web/browserbase.d.ts",
|
|
552
|
+
"document_loaders/web/browserbase.d.cts",
|
|
549
553
|
"document_loaders/web/cheerio.cjs",
|
|
550
554
|
"document_loaders/web/cheerio.js",
|
|
551
555
|
"document_loaders/web/cheerio.d.ts",
|
|
@@ -1226,6 +1230,7 @@
|
|
|
1226
1230
|
"@aws-sdk/credential-provider-node": "^3.388.0",
|
|
1227
1231
|
"@aws-sdk/types": "^3.357.0",
|
|
1228
1232
|
"@azure/storage-blob": "^12.15.0",
|
|
1233
|
+
"@browserbasehq/sdk": "^1.0.0",
|
|
1229
1234
|
"@cloudflare/workers-types": "^4.20230922.0",
|
|
1230
1235
|
"@faker-js/faker": "^7.6.0",
|
|
1231
1236
|
"@gomomento/sdk": "^1.51.1",
|
|
@@ -1314,6 +1319,7 @@
|
|
|
1314
1319
|
"@aws-sdk/client-sfn": "^3.310.0",
|
|
1315
1320
|
"@aws-sdk/credential-provider-node": "^3.388.0",
|
|
1316
1321
|
"@azure/storage-blob": "^12.15.0",
|
|
1322
|
+
"@browserbasehq/sdk": "*",
|
|
1317
1323
|
"@gomomento/sdk": "^1.51.1",
|
|
1318
1324
|
"@gomomento/sdk-core": "^1.51.1",
|
|
1319
1325
|
"@gomomento/sdk-web": "^1.51.1",
|
|
@@ -1377,6 +1383,9 @@
|
|
|
1377
1383
|
"@azure/storage-blob": {
|
|
1378
1384
|
"optional": true
|
|
1379
1385
|
},
|
|
1386
|
+
"@browserbasehq/sdk": {
|
|
1387
|
+
"optional": true
|
|
1388
|
+
},
|
|
1380
1389
|
"@gomomento/sdk": {
|
|
1381
1390
|
"optional": true
|
|
1382
1391
|
},
|
|
@@ -2763,6 +2772,15 @@
|
|
|
2763
2772
|
"import": "./document_loaders/web/azure_blob_storage_file.js",
|
|
2764
2773
|
"require": "./document_loaders/web/azure_blob_storage_file.cjs"
|
|
2765
2774
|
},
|
|
2775
|
+
"./document_loaders/web/browserbase": {
|
|
2776
|
+
"types": {
|
|
2777
|
+
"import": "./document_loaders/web/browserbase.d.ts",
|
|
2778
|
+
"require": "./document_loaders/web/browserbase.d.cts",
|
|
2779
|
+
"default": "./document_loaders/web/browserbase.d.ts"
|
|
2780
|
+
},
|
|
2781
|
+
"import": "./document_loaders/web/browserbase.js",
|
|
2782
|
+
"require": "./document_loaders/web/browserbase.cjs"
|
|
2783
|
+
},
|
|
2766
2784
|
"./document_loaders/web/cheerio": {
|
|
2767
2785
|
"types": {
|
|
2768
2786
|
"import": "./document_loaders/web/cheerio.d.ts",
|