langchain 0.0.136 → 0.0.137

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -133,7 +133,7 @@ class RecursiveUrlLoader extends base_js_1.BaseDocumentLoader {
133
133
  };
134
134
  }
135
135
  async getChildUrlsRecursive(inputUrl, visited = new Set(), depth = 0) {
136
- if (depth > this.maxDepth)
136
+ if (depth >= this.maxDepth)
137
137
  return [];
138
138
  let url = inputUrl;
139
139
  if (!inputUrl.endsWith("/"))
@@ -130,7 +130,7 @@ export class RecursiveUrlLoader extends BaseDocumentLoader {
130
130
  };
131
131
  }
132
132
  async getChildUrlsRecursive(inputUrl, visited = new Set(), depth = 0) {
133
- if (depth > this.maxDepth)
133
+ if (depth >= this.maxDepth)
134
134
  return [];
135
135
  let url = inputUrl;
136
136
  if (!inputUrl.endsWith("/"))
@@ -0,0 +1,132 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.LlamaCpp = void 0;
4
+ const node_llama_cpp_1 = require("node-llama-cpp");
5
+ const base_js_1 = require("./base.cjs");
6
+ /**
7
+ * To use this model you need to have the `node-llama-cpp` module installed.
8
+ * This can be installed using `npm install -S node-llama-cpp` and the minimum
9
+ * version supported in version 2.0.0.
10
+ * This also requires that have a locally built version of Llama2 installed.
11
+ */
12
+ class LlamaCpp extends base_js_1.LLM {
13
+ static lc_name() {
14
+ return "LlamaCpp";
15
+ }
16
+ constructor(inputs) {
17
+ super(inputs);
18
+ Object.defineProperty(this, "batchSize", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: void 0
23
+ });
24
+ Object.defineProperty(this, "contextSize", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: void 0
29
+ });
30
+ Object.defineProperty(this, "embedding", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: void 0
35
+ });
36
+ Object.defineProperty(this, "f16Kv", {
37
+ enumerable: true,
38
+ configurable: true,
39
+ writable: true,
40
+ value: void 0
41
+ });
42
+ Object.defineProperty(this, "gpuLayers", {
43
+ enumerable: true,
44
+ configurable: true,
45
+ writable: true,
46
+ value: void 0
47
+ });
48
+ Object.defineProperty(this, "logitsAll", {
49
+ enumerable: true,
50
+ configurable: true,
51
+ writable: true,
52
+ value: void 0
53
+ });
54
+ Object.defineProperty(this, "lowVram", {
55
+ enumerable: true,
56
+ configurable: true,
57
+ writable: true,
58
+ value: void 0
59
+ });
60
+ Object.defineProperty(this, "seed", {
61
+ enumerable: true,
62
+ configurable: true,
63
+ writable: true,
64
+ value: void 0
65
+ });
66
+ Object.defineProperty(this, "useMlock", {
67
+ enumerable: true,
68
+ configurable: true,
69
+ writable: true,
70
+ value: void 0
71
+ });
72
+ Object.defineProperty(this, "useMmap", {
73
+ enumerable: true,
74
+ configurable: true,
75
+ writable: true,
76
+ value: void 0
77
+ });
78
+ Object.defineProperty(this, "vocabOnly", {
79
+ enumerable: true,
80
+ configurable: true,
81
+ writable: true,
82
+ value: void 0
83
+ });
84
+ Object.defineProperty(this, "modelPath", {
85
+ enumerable: true,
86
+ configurable: true,
87
+ writable: true,
88
+ value: void 0
89
+ });
90
+ Object.defineProperty(this, "_model", {
91
+ enumerable: true,
92
+ configurable: true,
93
+ writable: true,
94
+ value: void 0
95
+ });
96
+ Object.defineProperty(this, "_context", {
97
+ enumerable: true,
98
+ configurable: true,
99
+ writable: true,
100
+ value: void 0
101
+ });
102
+ this.batchSize = inputs.batchSize;
103
+ this.contextSize = inputs.contextSize;
104
+ this.embedding = inputs.embedding;
105
+ this.f16Kv = inputs.f16Kv;
106
+ this.gpuLayers = inputs.gpuLayers;
107
+ this.logitsAll = inputs.logitsAll;
108
+ this.lowVram = inputs.lowVram;
109
+ this.modelPath = inputs.modelPath;
110
+ this.seed = inputs.seed;
111
+ this.useMlock = inputs.useMlock;
112
+ this.useMmap = inputs.useMmap;
113
+ this.vocabOnly = inputs.vocabOnly;
114
+ this._model = new node_llama_cpp_1.LlamaModel(inputs);
115
+ this._context = new node_llama_cpp_1.LlamaContext({ model: this._model });
116
+ }
117
+ _llmType() {
118
+ return "llama2_cpp";
119
+ }
120
+ /** @ignore */
121
+ async _call(prompt, options) {
122
+ const session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
123
+ try {
124
+ const compleation = await session.prompt(prompt, options);
125
+ return compleation;
126
+ }
127
+ catch (e) {
128
+ throw new Error("Error getting prompt compleation.");
129
+ }
130
+ }
131
+ }
132
+ exports.LlamaCpp = LlamaCpp;
@@ -0,0 +1,73 @@
1
+ import { LlamaModel, LlamaContext } from "node-llama-cpp";
2
+ import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
3
+ /**
4
+ * Note that the modelPath is the only required parameter. For testing you
5
+ * can set this in the environment variable `LLAMA_PATH`.
6
+ */
7
+ export interface LlamaCppInputs extends BaseLLMParams {
8
+ /** Prompt processing batch size. */
9
+ batchSize?: number;
10
+ /** Text context size. */
11
+ contextSize?: number;
12
+ /** Embedding mode only. */
13
+ embedding?: boolean;
14
+ /** Use fp16 for KV cache. */
15
+ f16Kv?: boolean;
16
+ /** Number of layers to store in VRAM. */
17
+ gpuLayers?: number;
18
+ /** The llama_eval() call computes all logits, not just the last one. */
19
+ logitsAll?: boolean;
20
+ /** If true, reduce VRAM usage at the cost of performance. */
21
+ lowVram?: boolean;
22
+ /** Path to the model on the filesystem. */
23
+ modelPath: string;
24
+ /** If null, a random seed will be used. */
25
+ seed?: null | number;
26
+ /** The randomness of the responses, e.g. 0.1 deterministic, 1.5 creative, 0.8 balanced, 0 disables. */
27
+ temperature?: number;
28
+ /** Consider the n most likely tokens, where n is 1 to vocabulary size, 0 disables (uses full vocabulary). Note: only applies when `temperature` > 0. */
29
+ topK?: number;
30
+ /** Selects the smallest token set whose probability exceeds P, where P is between 0 - 1, 1 disables. Note: only applies when `temperature` > 0. */
31
+ topP?: number;
32
+ /** Force system to keep model in RAM. */
33
+ useMlock?: boolean;
34
+ /** Use mmap if possible. */
35
+ useMmap?: boolean;
36
+ /** Only load the vocabulary, no weights. */
37
+ vocabOnly?: boolean;
38
+ }
39
+ export interface LlamaCppCallOptions extends BaseLLMCallOptions {
40
+ /** The maximum number of tokens the response should contain. */
41
+ maxTokens?: number;
42
+ /** A function called when matching the provided token array */
43
+ onToken?: (tokens: number[]) => void;
44
+ }
45
+ /**
46
+ * To use this model you need to have the `node-llama-cpp` module installed.
47
+ * This can be installed using `npm install -S node-llama-cpp` and the minimum
48
+ * version supported in version 2.0.0.
49
+ * This also requires that have a locally built version of Llama2 installed.
50
+ */
51
+ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
52
+ CallOptions: LlamaCppCallOptions;
53
+ static inputs: LlamaCppInputs;
54
+ batchSize?: number;
55
+ contextSize?: number;
56
+ embedding?: boolean;
57
+ f16Kv?: boolean;
58
+ gpuLayers?: number;
59
+ logitsAll?: boolean;
60
+ lowVram?: boolean;
61
+ seed?: null | number;
62
+ useMlock?: boolean;
63
+ useMmap?: boolean;
64
+ vocabOnly?: boolean;
65
+ modelPath: string;
66
+ _model: LlamaModel;
67
+ _context: LlamaContext;
68
+ static lc_name(): string;
69
+ constructor(inputs: LlamaCppInputs);
70
+ _llmType(): string;
71
+ /** @ignore */
72
+ _call(prompt: string, options?: this["ParsedCallOptions"]): Promise<string>;
73
+ }
@@ -0,0 +1,128 @@
1
+ import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
2
+ import { LLM } from "./base.js";
3
+ /**
4
+ * To use this model you need to have the `node-llama-cpp` module installed.
5
+ * This can be installed using `npm install -S node-llama-cpp` and the minimum
6
+ * version supported in version 2.0.0.
7
+ * This also requires that have a locally built version of Llama2 installed.
8
+ */
9
+ export class LlamaCpp extends LLM {
10
+ static lc_name() {
11
+ return "LlamaCpp";
12
+ }
13
+ constructor(inputs) {
14
+ super(inputs);
15
+ Object.defineProperty(this, "batchSize", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: void 0
20
+ });
21
+ Object.defineProperty(this, "contextSize", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: void 0
26
+ });
27
+ Object.defineProperty(this, "embedding", {
28
+ enumerable: true,
29
+ configurable: true,
30
+ writable: true,
31
+ value: void 0
32
+ });
33
+ Object.defineProperty(this, "f16Kv", {
34
+ enumerable: true,
35
+ configurable: true,
36
+ writable: true,
37
+ value: void 0
38
+ });
39
+ Object.defineProperty(this, "gpuLayers", {
40
+ enumerable: true,
41
+ configurable: true,
42
+ writable: true,
43
+ value: void 0
44
+ });
45
+ Object.defineProperty(this, "logitsAll", {
46
+ enumerable: true,
47
+ configurable: true,
48
+ writable: true,
49
+ value: void 0
50
+ });
51
+ Object.defineProperty(this, "lowVram", {
52
+ enumerable: true,
53
+ configurable: true,
54
+ writable: true,
55
+ value: void 0
56
+ });
57
+ Object.defineProperty(this, "seed", {
58
+ enumerable: true,
59
+ configurable: true,
60
+ writable: true,
61
+ value: void 0
62
+ });
63
+ Object.defineProperty(this, "useMlock", {
64
+ enumerable: true,
65
+ configurable: true,
66
+ writable: true,
67
+ value: void 0
68
+ });
69
+ Object.defineProperty(this, "useMmap", {
70
+ enumerable: true,
71
+ configurable: true,
72
+ writable: true,
73
+ value: void 0
74
+ });
75
+ Object.defineProperty(this, "vocabOnly", {
76
+ enumerable: true,
77
+ configurable: true,
78
+ writable: true,
79
+ value: void 0
80
+ });
81
+ Object.defineProperty(this, "modelPath", {
82
+ enumerable: true,
83
+ configurable: true,
84
+ writable: true,
85
+ value: void 0
86
+ });
87
+ Object.defineProperty(this, "_model", {
88
+ enumerable: true,
89
+ configurable: true,
90
+ writable: true,
91
+ value: void 0
92
+ });
93
+ Object.defineProperty(this, "_context", {
94
+ enumerable: true,
95
+ configurable: true,
96
+ writable: true,
97
+ value: void 0
98
+ });
99
+ this.batchSize = inputs.batchSize;
100
+ this.contextSize = inputs.contextSize;
101
+ this.embedding = inputs.embedding;
102
+ this.f16Kv = inputs.f16Kv;
103
+ this.gpuLayers = inputs.gpuLayers;
104
+ this.logitsAll = inputs.logitsAll;
105
+ this.lowVram = inputs.lowVram;
106
+ this.modelPath = inputs.modelPath;
107
+ this.seed = inputs.seed;
108
+ this.useMlock = inputs.useMlock;
109
+ this.useMmap = inputs.useMmap;
110
+ this.vocabOnly = inputs.vocabOnly;
111
+ this._model = new LlamaModel(inputs);
112
+ this._context = new LlamaContext({ model: this._model });
113
+ }
114
+ _llmType() {
115
+ return "llama2_cpp";
116
+ }
117
+ /** @ignore */
118
+ async _call(prompt, options) {
119
+ const session = new LlamaChatSession({ context: this._context });
120
+ try {
121
+ const compleation = await session.prompt(prompt, options);
122
+ return compleation;
123
+ }
124
+ catch (e) {
125
+ throw new Error("Error getting prompt compleation.");
126
+ }
127
+ }
128
+ }
@@ -29,6 +29,7 @@ exports.optionalImportEntrypoints = [
29
29
  "langchain/llms/googlepalm",
30
30
  "langchain/llms/sagemaker_endpoint",
31
31
  "langchain/llms/bedrock",
32
+ "langchain/llms/llama_cpp",
32
33
  "langchain/llms/writer",
33
34
  "langchain/prompts/load",
34
35
  "langchain/vectorstores/analyticdb",
@@ -26,6 +26,7 @@ export const optionalImportEntrypoints = [
26
26
  "langchain/llms/googlepalm",
27
27
  "langchain/llms/sagemaker_endpoint",
28
28
  "langchain/llms/bedrock",
29
+ "langchain/llms/llama_cpp",
29
30
  "langchain/llms/writer",
30
31
  "langchain/prompts/load",
31
32
  "langchain/vectorstores/analyticdb",
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__openai = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = void 0;
27
+ exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__openai = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -72,6 +72,7 @@ exports.retrievers__remote = __importStar(require("../retrievers/remote/index.cj
72
72
  exports.retrievers__databerry = __importStar(require("../retrievers/databerry.cjs"));
73
73
  exports.retrievers__contextual_compression = __importStar(require("../retrievers/contextual_compression.cjs"));
74
74
  exports.retrievers__document_compressors = __importStar(require("../retrievers/document_compressors/index.cjs"));
75
+ exports.retrievers__multi_vector = __importStar(require("../retrievers/multi_vector.cjs"));
75
76
  exports.retrievers__parent_document = __importStar(require("../retrievers/parent_document.cjs"));
76
77
  exports.retrievers__time_weighted = __importStar(require("../retrievers/time_weighted.cjs"));
77
78
  exports.retrievers__document_compressors__chain_extract = __importStar(require("../retrievers/document_compressors/chain_extract.cjs"));
@@ -44,6 +44,7 @@ export * as retrievers__remote from "../retrievers/remote/index.js";
44
44
  export * as retrievers__databerry from "../retrievers/databerry.js";
45
45
  export * as retrievers__contextual_compression from "../retrievers/contextual_compression.js";
46
46
  export * as retrievers__document_compressors from "../retrievers/document_compressors/index.js";
47
+ export * as retrievers__multi_vector from "../retrievers/multi_vector.js";
47
48
  export * as retrievers__parent_document from "../retrievers/parent_document.js";
48
49
  export * as retrievers__time_weighted from "../retrievers/time_weighted.js";
49
50
  export * as retrievers__document_compressors__chain_extract from "../retrievers/document_compressors/chain_extract.js";
@@ -45,6 +45,7 @@ export * as retrievers__remote from "../retrievers/remote/index.js";
45
45
  export * as retrievers__databerry from "../retrievers/databerry.js";
46
46
  export * as retrievers__contextual_compression from "../retrievers/contextual_compression.js";
47
47
  export * as retrievers__document_compressors from "../retrievers/document_compressors/index.js";
48
+ export * as retrievers__multi_vector from "../retrievers/multi_vector.js";
48
49
  export * as retrievers__parent_document from "../retrievers/parent_document.js";
49
50
  export * as retrievers__time_weighted from "../retrievers/time_weighted.js";
50
51
  export * as retrievers__document_compressors__chain_extract from "../retrievers/document_compressors/chain_extract.js";
@@ -0,0 +1,72 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MultiVectorRetriever = void 0;
4
+ const retriever_js_1 = require("../schema/retriever.cjs");
5
+ /**
6
+ * A retriever that retrieves documents from a vector store and a document
7
+ * store. It uses the vector store to find relevant documents based on a
8
+ * query, and then retrieves the full documents from the document store.
9
+ */
10
+ class MultiVectorRetriever extends retriever_js_1.BaseRetriever {
11
+ static lc_name() {
12
+ return "MultiVectorRetriever";
13
+ }
14
+ constructor(args) {
15
+ super(args);
16
+ Object.defineProperty(this, "lc_namespace", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: ["langchain", "retrievers", "multi_vector"]
21
+ });
22
+ Object.defineProperty(this, "vectorstore", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: void 0
27
+ });
28
+ Object.defineProperty(this, "docstore", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ Object.defineProperty(this, "idKey", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: void 0
39
+ });
40
+ Object.defineProperty(this, "childK", {
41
+ enumerable: true,
42
+ configurable: true,
43
+ writable: true,
44
+ value: void 0
45
+ });
46
+ Object.defineProperty(this, "parentK", {
47
+ enumerable: true,
48
+ configurable: true,
49
+ writable: true,
50
+ value: void 0
51
+ });
52
+ this.vectorstore = args.vectorstore;
53
+ this.docstore = args.docstore;
54
+ this.idKey = args.idKey ?? "doc_id";
55
+ this.childK = args.childK;
56
+ this.parentK = args.parentK;
57
+ }
58
+ async _getRelevantDocuments(query) {
59
+ const subDocs = await this.vectorstore.similaritySearch(query, this.childK);
60
+ const ids = [];
61
+ for (const doc of subDocs) {
62
+ if (doc.metadata[this.idKey] && !ids.includes(doc.metadata[this.idKey])) {
63
+ ids.push(doc.metadata[this.idKey]);
64
+ }
65
+ }
66
+ const docs = await this.docstore.mget(ids);
67
+ return docs
68
+ .filter((doc) => doc !== undefined)
69
+ .slice(0, this.parentK);
70
+ }
71
+ }
72
+ exports.MultiVectorRetriever = MultiVectorRetriever;
@@ -0,0 +1,30 @@
1
+ import { BaseStore } from "../schema/storage.js";
2
+ import { Document } from "../document.js";
3
+ import { BaseRetriever, BaseRetrieverInput } from "../schema/retriever.js";
4
+ import { VectorStore } from "../vectorstores/base.js";
5
+ /**
6
+ * Arguments for the MultiVectorRetriever class.
7
+ */
8
+ export interface MultiVectorRetrieverInput extends BaseRetrieverInput {
9
+ vectorstore: VectorStore;
10
+ docstore: BaseStore<string, Document>;
11
+ idKey?: string;
12
+ childK?: number;
13
+ parentK?: number;
14
+ }
15
+ /**
16
+ * A retriever that retrieves documents from a vector store and a document
17
+ * store. It uses the vector store to find relevant documents based on a
18
+ * query, and then retrieves the full documents from the document store.
19
+ */
20
+ export declare class MultiVectorRetriever extends BaseRetriever {
21
+ static lc_name(): string;
22
+ lc_namespace: string[];
23
+ vectorstore: VectorStore;
24
+ docstore: BaseStore<string, Document>;
25
+ protected idKey: string;
26
+ protected childK?: number;
27
+ protected parentK?: number;
28
+ constructor(args: MultiVectorRetrieverInput);
29
+ _getRelevantDocuments(query: string): Promise<Document[]>;
30
+ }
@@ -0,0 +1,68 @@
1
+ import { BaseRetriever } from "../schema/retriever.js";
2
+ /**
3
+ * A retriever that retrieves documents from a vector store and a document
4
+ * store. It uses the vector store to find relevant documents based on a
5
+ * query, and then retrieves the full documents from the document store.
6
+ */
7
+ export class MultiVectorRetriever extends BaseRetriever {
8
+ static lc_name() {
9
+ return "MultiVectorRetriever";
10
+ }
11
+ constructor(args) {
12
+ super(args);
13
+ Object.defineProperty(this, "lc_namespace", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: ["langchain", "retrievers", "multi_vector"]
18
+ });
19
+ Object.defineProperty(this, "vectorstore", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: void 0
24
+ });
25
+ Object.defineProperty(this, "docstore", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ Object.defineProperty(this, "idKey", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: void 0
36
+ });
37
+ Object.defineProperty(this, "childK", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: void 0
42
+ });
43
+ Object.defineProperty(this, "parentK", {
44
+ enumerable: true,
45
+ configurable: true,
46
+ writable: true,
47
+ value: void 0
48
+ });
49
+ this.vectorstore = args.vectorstore;
50
+ this.docstore = args.docstore;
51
+ this.idKey = args.idKey ?? "doc_id";
52
+ this.childK = args.childK;
53
+ this.parentK = args.parentK;
54
+ }
55
+ async _getRelevantDocuments(query) {
56
+ const subDocs = await this.vectorstore.similaritySearch(query, this.childK);
57
+ const ids = [];
58
+ for (const doc of subDocs) {
59
+ if (doc.metadata[this.idKey] && !ids.includes(doc.metadata[this.idKey])) {
60
+ ids.push(doc.metadata[this.idKey]);
61
+ }
62
+ }
63
+ const docs = await this.docstore.mget(ids);
64
+ return docs
65
+ .filter((doc) => doc !== undefined)
66
+ .slice(0, this.parentK);
67
+ }
68
+ }
@@ -27,6 +27,7 @@ exports.ParentDocumentRetriever = void 0;
27
27
  const uuid = __importStar(require("uuid"));
28
28
  const retriever_js_1 = require("../schema/retriever.cjs");
29
29
  const document_js_1 = require("../document.cjs");
30
+ // TODO: Change this to subclass MultiVectorRetriever
30
31
  /**
31
32
  * A type of document retriever that splits input documents into smaller chunks
32
33
  * while separately storing and preserving the original documents.
@@ -1,6 +1,7 @@
1
1
  import * as uuid from "uuid";
2
2
  import { BaseRetriever } from "../schema/retriever.js";
3
3
  import { Document } from "../document.js";
4
+ // TODO: Change this to subclass MultiVectorRetriever
4
5
  /**
5
6
  * A type of document retriever that splits input documents into smaller chunks
6
7
  * while separately storing and preserving the original documents.
@@ -6,10 +6,7 @@ const runnable_js_1 = require("./runnable.cjs");
6
6
  /**
7
7
  * Abstract base class for a Document retrieval system. A retrieval system
8
8
  * is defined as something that can take string queries and return the
9
- * most 'relevant' Documents from some source. It extends the `Runnable`
10
- * class, which means it is a unit of work that can be invoked, batched,
11
- * streamed, or transformed. In the context of `BaseRetriever`, it is
12
- * invoked with a string input and returns an array of `Document` objects.
9
+ * most 'relevant' Documents from some source.
13
10
  */
14
11
  class BaseRetriever extends runnable_js_1.Runnable {
15
12
  constructor(fields) {
@@ -2,7 +2,7 @@ import { BaseCallbackConfig, CallbackManagerForRetrieverRun, Callbacks } from ".
2
2
  import { Document } from "../document.js";
3
3
  import { Runnable, RunnableConfig } from "./runnable.js";
4
4
  /**
5
- * Base Index class. All indexes should extend this class.
5
+ * Base Retriever class. All indexes should extend this class.
6
6
  */
7
7
  export interface BaseRetrieverInput {
8
8
  callbacks?: Callbacks;
@@ -13,10 +13,7 @@ export interface BaseRetrieverInput {
13
13
  /**
14
14
  * Abstract base class for a Document retrieval system. A retrieval system
15
15
  * is defined as something that can take string queries and return the
16
- * most 'relevant' Documents from some source. It extends the `Runnable`
17
- * class, which means it is a unit of work that can be invoked, batched,
18
- * streamed, or transformed. In the context of `BaseRetriever`, it is
19
- * invoked with a string input and returns an array of `Document` objects.
16
+ * most 'relevant' Documents from some source.
20
17
  */
21
18
  export declare abstract class BaseRetriever extends Runnable<string, Document[]> {
22
19
  callbacks?: Callbacks;
@@ -3,10 +3,7 @@ import { Runnable } from "./runnable.js";
3
3
  /**
4
4
  * Abstract base class for a Document retrieval system. A retrieval system
5
5
  * is defined as something that can take string queries and return the
6
- * most 'relevant' Documents from some source. It extends the `Runnable`
7
- * class, which means it is a unit of work that can be invoked, batched,
8
- * streamed, or transformed. In the context of `BaseRetriever`, it is
9
- * invoked with a string input and returns an array of `Document` objects.
6
+ * most 'relevant' Documents from some source.
10
7
  */
11
8
  export class BaseRetriever extends Runnable {
12
9
  constructor(fields) {
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/llms/llama_cpp.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/llms/llama_cpp.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/llms/llama_cpp.js'
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.0.136",
3
+ "version": "0.0.137",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {
@@ -145,6 +145,9 @@
145
145
  "llms/bedrock.cjs",
146
146
  "llms/bedrock.js",
147
147
  "llms/bedrock.d.ts",
148
+ "llms/llama_cpp.cjs",
149
+ "llms/llama_cpp.js",
150
+ "llms/llama_cpp.d.ts",
148
151
  "llms/writer.cjs",
149
152
  "llms/writer.js",
150
153
  "llms/writer.d.ts",
@@ -448,6 +451,9 @@
448
451
  "retrievers/document_compressors.cjs",
449
452
  "retrievers/document_compressors.js",
450
453
  "retrievers/document_compressors.d.ts",
454
+ "retrievers/multi_vector.cjs",
455
+ "retrievers/multi_vector.js",
456
+ "retrievers/multi_vector.d.ts",
451
457
  "retrievers/parent_document.cjs",
452
458
  "retrievers/parent_document.js",
453
459
  "retrievers/parent_document.d.ts",
@@ -687,6 +693,7 @@
687
693
  "ml-matrix": "^6.10.4",
688
694
  "mongodb": "^5.2.0",
689
695
  "mysql2": "^3.3.3",
696
+ "node-llama-cpp": "^2.1.2",
690
697
  "notion-to-md": "^3.1.0",
691
698
  "pdf-parse": "1.1.1",
692
699
  "peggy": "^3.0.2",
@@ -772,6 +779,7 @@
772
779
  "mammoth": "*",
773
780
  "mongodb": "^5.2.0",
774
781
  "mysql2": "^3.3.3",
782
+ "node-llama-cpp": "*",
775
783
  "notion-to-md": "^3.1.0",
776
784
  "pdf-parse": "1.1.1",
777
785
  "peggy": "^3.0.2",
@@ -964,6 +972,9 @@
964
972
  "mysql2": {
965
973
  "optional": true
966
974
  },
975
+ "node-llama-cpp": {
976
+ "optional": true
977
+ },
967
978
  "notion-to-md": {
968
979
  "optional": true
969
980
  },
@@ -1297,6 +1308,11 @@
1297
1308
  "import": "./llms/bedrock.js",
1298
1309
  "require": "./llms/bedrock.cjs"
1299
1310
  },
1311
+ "./llms/llama_cpp": {
1312
+ "types": "./llms/llama_cpp.d.ts",
1313
+ "import": "./llms/llama_cpp.js",
1314
+ "require": "./llms/llama_cpp.cjs"
1315
+ },
1300
1316
  "./llms/writer": {
1301
1317
  "types": "./llms/writer.d.ts",
1302
1318
  "import": "./llms/writer.js",
@@ -1810,6 +1826,11 @@
1810
1826
  "import": "./retrievers/document_compressors.js",
1811
1827
  "require": "./retrievers/document_compressors.cjs"
1812
1828
  },
1829
+ "./retrievers/multi_vector": {
1830
+ "types": "./retrievers/multi_vector.d.ts",
1831
+ "import": "./retrievers/multi_vector.js",
1832
+ "require": "./retrievers/multi_vector.cjs"
1833
+ },
1813
1834
  "./retrievers/parent_document": {
1814
1835
  "types": "./retrievers/parent_document.d.ts",
1815
1836
  "import": "./retrievers/parent_document.js",
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/retrievers/multi_vector.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/retrievers/multi_vector.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/retrievers/multi_vector.js'