langchain 0.0.166 → 0.0.167

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/embeddings/bedrock.cjs +43 -22
  2. package/dist/embeddings/bedrock.d.ts +11 -4
  3. package/dist/embeddings/bedrock.js +43 -22
  4. package/dist/llms/yandex.cjs +100 -0
  5. package/dist/llms/yandex.d.ts +40 -0
  6. package/dist/llms/yandex.js +96 -0
  7. package/dist/load/import_constants.cjs +2 -0
  8. package/dist/load/import_constants.js +2 -0
  9. package/dist/load/import_map.cjs +4 -2
  10. package/dist/load/import_map.d.ts +2 -0
  11. package/dist/load/import_map.js +2 -0
  12. package/dist/retrievers/multi_vector.d.ts +3 -3
  13. package/dist/retrievers/parent_document.cjs +6 -16
  14. package/dist/retrievers/parent_document.d.ts +5 -12
  15. package/dist/retrievers/parent_document.js +6 -16
  16. package/dist/schema/storage.d.ts +28 -1
  17. package/dist/storage/encoder_backed.cjs +14 -2
  18. package/dist/storage/encoder_backed.d.ts +2 -0
  19. package/dist/storage/encoder_backed.js +12 -1
  20. package/dist/storage/in_memory.cjs +1 -1
  21. package/dist/storage/in_memory.js +1 -1
  22. package/dist/storage/ioredis.cjs +4 -4
  23. package/dist/storage/ioredis.js +4 -4
  24. package/dist/storage/vercel_kv.cjs +146 -0
  25. package/dist/storage/vercel_kv.d.ts +46 -0
  26. package/dist/storage/vercel_kv.js +142 -0
  27. package/dist/stores/doc/in_memory.cjs +13 -0
  28. package/dist/stores/doc/in_memory.d.ts +6 -1
  29. package/dist/stores/doc/in_memory.js +13 -0
  30. package/dist/vectorstores/cassandra.cjs +4 -2
  31. package/dist/vectorstores/cassandra.js +4 -2
  32. package/dist/vectorstores/elasticsearch.cjs +3 -1
  33. package/dist/vectorstores/elasticsearch.js +3 -1
  34. package/dist/vectorstores/neo4j_vector.cjs +578 -0
  35. package/dist/vectorstores/neo4j_vector.d.ts +61 -0
  36. package/dist/vectorstores/neo4j_vector.js +548 -0
  37. package/llms/yandex.cjs +1 -0
  38. package/llms/yandex.d.ts +1 -0
  39. package/llms/yandex.js +1 -0
  40. package/package.json +38 -1
  41. package/storage/encoder_backed.cjs +1 -0
  42. package/storage/encoder_backed.d.ts +1 -0
  43. package/storage/encoder_backed.js +1 -0
  44. package/storage/vercel_kv.cjs +1 -0
  45. package/storage/vercel_kv.d.ts +1 -0
  46. package/storage/vercel_kv.js +1 -0
  47. package/vectorstores/neo4j_vector.cjs +1 -0
  48. package/vectorstores/neo4j_vector.d.ts +1 -0
  49. package/vectorstores/neo4j_vector.js +1 -0
@@ -22,6 +22,12 @@ class BedrockEmbeddings extends base_js_1.Embeddings {
22
22
  writable: true,
23
23
  value: void 0
24
24
  });
25
+ Object.defineProperty(this, "batchSize", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: 512
30
+ });
25
31
  this.model = fields?.model ?? "amazon.titan-embed-text-v1";
26
32
  this.client =
27
33
  fields?.client ??
@@ -30,24 +36,40 @@ class BedrockEmbeddings extends base_js_1.Embeddings {
30
36
  credentials: fields?.credentials,
31
37
  });
32
38
  }
39
+ /**
40
+ * Protected method to make a request to the Bedrock API to generate
41
+ * embeddings. Handles the retry logic and returns the response from the
42
+ * API.
43
+ * @param request Request to send to the Bedrock API.
44
+ * @returns Promise that resolves to the response from the API.
45
+ */
33
46
  async _embedText(text) {
34
- // replace newlines, which can negatively affect performance.
35
- const cleanedText = text.replace(/\n/g, " ");
36
- const res = await this.client.send(new client_bedrock_runtime_1.InvokeModelCommand({
37
- modelId: this.model,
38
- body: JSON.stringify({
39
- inputText: cleanedText,
40
- }),
41
- contentType: "application/json",
42
- accept: "application/json",
43
- }));
44
- try {
45
- const body = new TextDecoder().decode(res.body);
46
- return JSON.parse(body).embedding;
47
- }
48
- catch (e) {
49
- throw new Error("An invalid response was returned by Bedrock.");
50
- }
47
+ return this.caller.call(async () => {
48
+ try {
49
+ // replace newlines, which can negatively affect performance.
50
+ const cleanedText = text.replace(/\n/g, " ");
51
+ const res = await this.client.send(new client_bedrock_runtime_1.InvokeModelCommand({
52
+ modelId: this.model,
53
+ body: JSON.stringify({
54
+ inputText: cleanedText,
55
+ }),
56
+ contentType: "application/json",
57
+ accept: "application/json",
58
+ }));
59
+ const body = new TextDecoder().decode(res.body);
60
+ return JSON.parse(body).embedding;
61
+ }
62
+ catch (e) {
63
+ console.error({
64
+ error: e,
65
+ });
66
+ // eslint-disable-next-line no-instanceof/no-instanceof
67
+ if (e instanceof Error) {
68
+ throw new Error(`An error occurred while embedding documents with Bedrock: ${e.message}`);
69
+ }
70
+ throw new Error("An error occurred while embedding documents with Bedrock");
71
+ }
72
+ });
51
73
  }
52
74
  /**
53
75
  * Method that takes a document as input and returns a promise that
@@ -60,13 +82,12 @@ class BedrockEmbeddings extends base_js_1.Embeddings {
60
82
  return this.caller.callWithOptions({}, this._embedText.bind(this), document);
61
83
  }
62
84
  /**
63
- * Method that takes an array of documents as input and returns a promise
64
- * that resolves to a 2D array of embeddings for each document. It calls
65
- * the _embedText method for each document in the array.
66
- * @param documents Array of documents for which to generate embeddings.
85
+ * Method to generate embeddings for an array of texts. Calls _embedText
86
+ * method which batches and handles retry logic when calling the AWS Bedrock API.
87
+ * @param documents Array of texts for which to generate embeddings.
67
88
  * @returns Promise that resolves to a 2D array of embeddings for each input document.
68
89
  */
69
- embedDocuments(documents) {
90
+ async embedDocuments(documents) {
70
91
  return Promise.all(documents.map((document) => this._embedText(document)));
71
92
  }
72
93
  }
@@ -26,7 +26,15 @@ export interface BedrockEmbeddingsParams extends EmbeddingsParams {
26
26
  export declare class BedrockEmbeddings extends Embeddings implements BedrockEmbeddingsParams {
27
27
  model: string;
28
28
  client: BedrockRuntimeClient;
29
+ batchSize: number;
29
30
  constructor(fields?: BedrockEmbeddingsParams);
31
+ /**
32
+ * Protected method to make a request to the Bedrock API to generate
33
+ * embeddings. Handles the retry logic and returns the response from the
34
+ * API.
35
+ * @param request Request to send to the Bedrock API.
36
+ * @returns Promise that resolves to the response from the API.
37
+ */
30
38
  protected _embedText(text: string): Promise<number[]>;
31
39
  /**
32
40
  * Method that takes a document as input and returns a promise that
@@ -37,10 +45,9 @@ export declare class BedrockEmbeddings extends Embeddings implements BedrockEmbe
37
45
  */
38
46
  embedQuery(document: string): Promise<number[]>;
39
47
  /**
40
- * Method that takes an array of documents as input and returns a promise
41
- * that resolves to a 2D array of embeddings for each document. It calls
42
- * the _embedText method for each document in the array.
43
- * @param documents Array of documents for which to generate embeddings.
48
+ * Method to generate embeddings for an array of texts. Calls _embedText
49
+ * method which batches and handles retry logic when calling the AWS Bedrock API.
50
+ * @param documents Array of texts for which to generate embeddings.
44
51
  * @returns Promise that resolves to a 2D array of embeddings for each input document.
45
52
  */
46
53
  embedDocuments(documents: string[]): Promise<number[][]>;
@@ -19,6 +19,12 @@ export class BedrockEmbeddings extends Embeddings {
19
19
  writable: true,
20
20
  value: void 0
21
21
  });
22
+ Object.defineProperty(this, "batchSize", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: 512
27
+ });
22
28
  this.model = fields?.model ?? "amazon.titan-embed-text-v1";
23
29
  this.client =
24
30
  fields?.client ??
@@ -27,24 +33,40 @@ export class BedrockEmbeddings extends Embeddings {
27
33
  credentials: fields?.credentials,
28
34
  });
29
35
  }
36
+ /**
37
+ * Protected method to make a request to the Bedrock API to generate
38
+ * embeddings. Handles the retry logic and returns the response from the
39
+ * API.
40
+ * @param request Request to send to the Bedrock API.
41
+ * @returns Promise that resolves to the response from the API.
42
+ */
30
43
  async _embedText(text) {
31
- // replace newlines, which can negatively affect performance.
32
- const cleanedText = text.replace(/\n/g, " ");
33
- const res = await this.client.send(new InvokeModelCommand({
34
- modelId: this.model,
35
- body: JSON.stringify({
36
- inputText: cleanedText,
37
- }),
38
- contentType: "application/json",
39
- accept: "application/json",
40
- }));
41
- try {
42
- const body = new TextDecoder().decode(res.body);
43
- return JSON.parse(body).embedding;
44
- }
45
- catch (e) {
46
- throw new Error("An invalid response was returned by Bedrock.");
47
- }
44
+ return this.caller.call(async () => {
45
+ try {
46
+ // replace newlines, which can negatively affect performance.
47
+ const cleanedText = text.replace(/\n/g, " ");
48
+ const res = await this.client.send(new InvokeModelCommand({
49
+ modelId: this.model,
50
+ body: JSON.stringify({
51
+ inputText: cleanedText,
52
+ }),
53
+ contentType: "application/json",
54
+ accept: "application/json",
55
+ }));
56
+ const body = new TextDecoder().decode(res.body);
57
+ return JSON.parse(body).embedding;
58
+ }
59
+ catch (e) {
60
+ console.error({
61
+ error: e,
62
+ });
63
+ // eslint-disable-next-line no-instanceof/no-instanceof
64
+ if (e instanceof Error) {
65
+ throw new Error(`An error occurred while embedding documents with Bedrock: ${e.message}`);
66
+ }
67
+ throw new Error("An error occurred while embedding documents with Bedrock");
68
+ }
69
+ });
48
70
  }
49
71
  /**
50
72
  * Method that takes a document as input and returns a promise that
@@ -57,13 +79,12 @@ export class BedrockEmbeddings extends Embeddings {
57
79
  return this.caller.callWithOptions({}, this._embedText.bind(this), document);
58
80
  }
59
81
  /**
60
- * Method that takes an array of documents as input and returns a promise
61
- * that resolves to a 2D array of embeddings for each document. It calls
62
- * the _embedText method for each document in the array.
63
- * @param documents Array of documents for which to generate embeddings.
82
+ * Method to generate embeddings for an array of texts. Calls _embedText
83
+ * method which batches and handles retry logic when calling the AWS Bedrock API.
84
+ * @param documents Array of texts for which to generate embeddings.
64
85
  * @returns Promise that resolves to a 2D array of embeddings for each input document.
65
86
  */
66
- embedDocuments(documents) {
87
+ async embedDocuments(documents) {
67
88
  return Promise.all(documents.map((document) => this._embedText(document)));
68
89
  }
69
90
  }
@@ -0,0 +1,100 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.YandexGPT = void 0;
4
+ const env_js_1 = require("../util/env.cjs");
5
+ const base_js_1 = require("./base.cjs");
6
+ const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/instruct";
7
+ class YandexGPT extends base_js_1.LLM {
8
+ static lc_name() {
9
+ return "Yandex GPT";
10
+ }
11
+ get lc_secrets() {
12
+ return {
13
+ apiKey: "YC_API_KEY",
14
+ iamToken: "YC_IAM_TOKEN",
15
+ };
16
+ }
17
+ constructor(fields) {
18
+ super(fields ?? {});
19
+ Object.defineProperty(this, "temperature", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: 0.6
24
+ });
25
+ Object.defineProperty(this, "maxTokens", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: 1700
30
+ });
31
+ Object.defineProperty(this, "model", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: "general"
36
+ });
37
+ Object.defineProperty(this, "apiKey", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: void 0
42
+ });
43
+ Object.defineProperty(this, "iamToken", {
44
+ enumerable: true,
45
+ configurable: true,
46
+ writable: true,
47
+ value: void 0
48
+ });
49
+ const apiKey = fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("YC_API_KEY");
50
+ const iamToken = fields?.iamToken ?? (0, env_js_1.getEnvironmentVariable)("YC_IAM_TOKEN");
51
+ if (apiKey === undefined && iamToken === undefined) {
52
+ throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
53
+ }
54
+ this.apiKey = apiKey;
55
+ this.iamToken = iamToken;
56
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
57
+ this.temperature = fields?.temperature ?? this.temperature;
58
+ this.model = fields?.model ?? this.model;
59
+ }
60
+ _llmType() {
61
+ return "yandexgpt";
62
+ }
63
+ /** @ignore */
64
+ async _call(prompt, options) {
65
+ // Hit the `generate` endpoint on the `large` model
66
+ return this.caller.callWithOptions({ signal: options.signal }, async () => {
67
+ const headers = { "Content-Type": "application/json", Authorization: "" };
68
+ if (this.apiKey !== undefined) {
69
+ headers.Authorization = `Api-Key ${this.apiKey}`;
70
+ }
71
+ else {
72
+ headers.Authorization = `Bearer ${this.iamToken}`;
73
+ }
74
+ const bodyData = {
75
+ model: this.model,
76
+ generationOptions: {
77
+ temperature: this.temperature,
78
+ maxTokens: this.maxTokens,
79
+ },
80
+ requestText: prompt,
81
+ };
82
+ try {
83
+ const response = await fetch(apiUrl, {
84
+ method: "POST",
85
+ headers,
86
+ body: JSON.stringify(bodyData),
87
+ });
88
+ if (!response.ok) {
89
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
90
+ }
91
+ const responseData = await response.json();
92
+ return responseData.result.alternatives[0].text;
93
+ }
94
+ catch (error) {
95
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
96
+ }
97
+ });
98
+ }
99
+ }
100
+ exports.YandexGPT = YandexGPT;
@@ -0,0 +1,40 @@
1
+ import { LLM, BaseLLMParams } from "./base.js";
2
+ export interface YandexGPTInputs extends BaseLLMParams {
3
+ /**
4
+ * What sampling temperature to use.
5
+ * Should be a double number between 0 (inclusive) and 1 (inclusive).
6
+ */
7
+ temperature?: number;
8
+ /**
9
+ * Maximum limit on the total number of tokens
10
+ * used for both the input prompt and the generated response.
11
+ */
12
+ maxTokens?: number;
13
+ /** Model name to use. */
14
+ model?: string;
15
+ /**
16
+ * Yandex Cloud Api Key for service account
17
+ * with the `ai.languageModels.user` role.
18
+ */
19
+ apiKey?: string;
20
+ /**
21
+ * Yandex Cloud IAM token for service account
22
+ * with the `ai.languageModels.user` role.
23
+ */
24
+ iamToken?: string;
25
+ }
26
+ export declare class YandexGPT extends LLM implements YandexGPTInputs {
27
+ static lc_name(): string;
28
+ get lc_secrets(): {
29
+ [key: string]: string;
30
+ } | undefined;
31
+ temperature: number;
32
+ maxTokens: number;
33
+ model: string;
34
+ apiKey?: string;
35
+ iamToken?: string;
36
+ constructor(fields?: YandexGPTInputs);
37
+ _llmType(): string;
38
+ /** @ignore */
39
+ _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
40
+ }
@@ -0,0 +1,96 @@
1
+ import { getEnvironmentVariable } from "../util/env.js";
2
+ import { LLM } from "./base.js";
3
+ const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/instruct";
4
+ export class YandexGPT extends LLM {
5
+ static lc_name() {
6
+ return "Yandex GPT";
7
+ }
8
+ get lc_secrets() {
9
+ return {
10
+ apiKey: "YC_API_KEY",
11
+ iamToken: "YC_IAM_TOKEN",
12
+ };
13
+ }
14
+ constructor(fields) {
15
+ super(fields ?? {});
16
+ Object.defineProperty(this, "temperature", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: 0.6
21
+ });
22
+ Object.defineProperty(this, "maxTokens", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: 1700
27
+ });
28
+ Object.defineProperty(this, "model", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: "general"
33
+ });
34
+ Object.defineProperty(this, "apiKey", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: void 0
39
+ });
40
+ Object.defineProperty(this, "iamToken", {
41
+ enumerable: true,
42
+ configurable: true,
43
+ writable: true,
44
+ value: void 0
45
+ });
46
+ const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
47
+ const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
48
+ if (apiKey === undefined && iamToken === undefined) {
49
+ throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
50
+ }
51
+ this.apiKey = apiKey;
52
+ this.iamToken = iamToken;
53
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
54
+ this.temperature = fields?.temperature ?? this.temperature;
55
+ this.model = fields?.model ?? this.model;
56
+ }
57
+ _llmType() {
58
+ return "yandexgpt";
59
+ }
60
+ /** @ignore */
61
+ async _call(prompt, options) {
62
+ // Hit the `generate` endpoint on the `large` model
63
+ return this.caller.callWithOptions({ signal: options.signal }, async () => {
64
+ const headers = { "Content-Type": "application/json", Authorization: "" };
65
+ if (this.apiKey !== undefined) {
66
+ headers.Authorization = `Api-Key ${this.apiKey}`;
67
+ }
68
+ else {
69
+ headers.Authorization = `Bearer ${this.iamToken}`;
70
+ }
71
+ const bodyData = {
72
+ model: this.model,
73
+ generationOptions: {
74
+ temperature: this.temperature,
75
+ maxTokens: this.maxTokens,
76
+ },
77
+ requestText: prompt,
78
+ };
79
+ try {
80
+ const response = await fetch(apiUrl, {
81
+ method: "POST",
82
+ headers,
83
+ body: JSON.stringify(bodyData),
84
+ });
85
+ if (!response.ok) {
86
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
87
+ }
88
+ const responseData = await response.json();
89
+ return responseData.result.alternatives[0].text;
90
+ }
91
+ catch (error) {
92
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
93
+ }
94
+ });
95
+ }
96
+ }
@@ -58,6 +58,7 @@ exports.optionalImportEntrypoints = [
58
58
  "langchain/vectorstores/opensearch",
59
59
  "langchain/vectorstores/pgvector",
60
60
  "langchain/vectorstores/milvus",
61
+ "langchain/vectorstores/neo4j_vector",
61
62
  "langchain/vectorstores/typeorm",
62
63
  "langchain/vectorstores/myscale",
63
64
  "langchain/vectorstores/redis",
@@ -140,6 +141,7 @@ exports.optionalImportEntrypoints = [
140
141
  "langchain/stores/message/planetscale",
141
142
  "langchain/stores/message/xata",
142
143
  "langchain/storage/ioredis",
144
+ "langchain/storage/vercel_kv",
143
145
  "langchain/graphs/neo4j_graph",
144
146
  "langchain/hub",
145
147
  "langchain/experimental/multimodal_embeddings/googlevertexai",
@@ -55,6 +55,7 @@ export const optionalImportEntrypoints = [
55
55
  "langchain/vectorstores/opensearch",
56
56
  "langchain/vectorstores/pgvector",
57
57
  "langchain/vectorstores/milvus",
58
+ "langchain/vectorstores/neo4j_vector",
58
59
  "langchain/vectorstores/typeorm",
59
60
  "langchain/vectorstores/myscale",
60
61
  "langchain/vectorstores/redis",
@@ -137,6 +138,7 @@ export const optionalImportEntrypoints = [
137
138
  "langchain/stores/message/planetscale",
138
139
  "langchain/stores/message/xata",
139
140
  "langchain/storage/ioredis",
141
+ "langchain/storage/vercel_kv",
140
142
  "langchain/graphs/neo4j_graph",
141
143
  "langchain/hub",
142
144
  "langchain/experimental/multimodal_embeddings/googlevertexai",
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fireworks = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = void 0;
27
+ exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -45,6 +45,7 @@ exports.llms__ai21 = __importStar(require("../llms/ai21.cjs"));
45
45
  exports.llms__aleph_alpha = __importStar(require("../llms/aleph_alpha.cjs"));
46
46
  exports.llms__ollama = __importStar(require("../llms/ollama.cjs"));
47
47
  exports.llms__fireworks = __importStar(require("../llms/fireworks.cjs"));
48
+ exports.llms__yandex = __importStar(require("../llms/yandex.cjs"));
48
49
  exports.prompts = __importStar(require("../prompts/index.cjs"));
49
50
  exports.vectorstores__base = __importStar(require("../vectorstores/base.cjs"));
50
51
  exports.vectorstores__memory = __importStar(require("../vectorstores/memory.cjs"));
@@ -94,6 +95,7 @@ exports.cache = __importStar(require("../cache/index.cjs"));
94
95
  exports.stores__doc__in_memory = __importStar(require("../stores/doc/in_memory.cjs"));
95
96
  exports.stores__file__in_memory = __importStar(require("../stores/file/in_memory.cjs"));
96
97
  exports.stores__message__in_memory = __importStar(require("../stores/message/in_memory.cjs"));
98
+ exports.storage__encoder_backed = __importStar(require("../storage/encoder_backed.cjs"));
97
99
  exports.storage__in_memory = __importStar(require("../storage/in_memory.cjs"));
98
100
  exports.util__math = __importStar(require("../util/math.cjs"));
99
101
  exports.util__time = __importStar(require("../util/time.cjs"));
@@ -17,6 +17,7 @@ export * as llms__ai21 from "../llms/ai21.js";
17
17
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
18
18
  export * as llms__ollama from "../llms/ollama.js";
19
19
  export * as llms__fireworks from "../llms/fireworks.js";
20
+ export * as llms__yandex from "../llms/yandex.js";
20
21
  export * as prompts from "../prompts/index.js";
21
22
  export * as vectorstores__base from "../vectorstores/base.js";
22
23
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -66,6 +67,7 @@ export * as cache from "../cache/index.js";
66
67
  export * as stores__doc__in_memory from "../stores/doc/in_memory.js";
67
68
  export * as stores__file__in_memory from "../stores/file/in_memory.js";
68
69
  export * as stores__message__in_memory from "../stores/message/in_memory.js";
70
+ export * as storage__encoder_backed from "../storage/encoder_backed.js";
69
71
  export * as storage__in_memory from "../storage/in_memory.js";
70
72
  export * as util__math from "../util/math.js";
71
73
  export * as util__time from "../util/time.js";
@@ -18,6 +18,7 @@ export * as llms__ai21 from "../llms/ai21.js";
18
18
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
19
19
  export * as llms__ollama from "../llms/ollama.js";
20
20
  export * as llms__fireworks from "../llms/fireworks.js";
21
+ export * as llms__yandex from "../llms/yandex.js";
21
22
  export * as prompts from "../prompts/index.js";
22
23
  export * as vectorstores__base from "../vectorstores/base.js";
23
24
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -67,6 +68,7 @@ export * as cache from "../cache/index.js";
67
68
  export * as stores__doc__in_memory from "../stores/doc/in_memory.js";
68
69
  export * as stores__file__in_memory from "../stores/file/in_memory.js";
69
70
  export * as stores__message__in_memory from "../stores/message/in_memory.js";
71
+ export * as storage__encoder_backed from "../storage/encoder_backed.js";
70
72
  export * as storage__in_memory from "../storage/in_memory.js";
71
73
  export * as util__math from "../util/math.js";
72
74
  export * as util__time from "../util/time.js";
@@ -1,4 +1,4 @@
1
- import { BaseStore } from "../schema/storage.js";
1
+ import { BaseStoreInterface } from "../schema/storage.js";
2
2
  import { Document } from "../document.js";
3
3
  import { BaseRetriever, BaseRetrieverInput } from "../schema/retriever.js";
4
4
  import { VectorStore } from "../vectorstores/base.js";
@@ -7,7 +7,7 @@ import { VectorStore } from "../vectorstores/base.js";
7
7
  */
8
8
  export interface MultiVectorRetrieverInput extends BaseRetrieverInput {
9
9
  vectorstore: VectorStore;
10
- docstore: BaseStore<string, Document>;
10
+ docstore: BaseStoreInterface<string, Document>;
11
11
  idKey?: string;
12
12
  childK?: number;
13
13
  parentK?: number;
@@ -21,7 +21,7 @@ export declare class MultiVectorRetriever extends BaseRetriever {
21
21
  static lc_name(): string;
22
22
  lc_namespace: string[];
23
23
  vectorstore: VectorStore;
24
- docstore: BaseStore<string, Document>;
24
+ docstore: BaseStoreInterface<string, Document>;
25
25
  protected idKey: string;
26
26
  protected childK?: number;
27
27
  protected parentK?: number;
@@ -25,9 +25,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
25
25
  Object.defineProperty(exports, "__esModule", { value: true });
26
26
  exports.ParentDocumentRetriever = void 0;
27
27
  const uuid = __importStar(require("uuid"));
28
- const retriever_js_1 = require("../schema/retriever.cjs");
29
28
  const document_js_1 = require("../document.cjs");
30
- // TODO: Change this to subclass MultiVectorRetriever
29
+ const multi_vector_js_1 = require("./multi_vector.cjs");
31
30
  /**
32
31
  * A type of document retriever that splits input documents into smaller chunks
33
32
  * while separately storing and preserving the original documents.
@@ -37,7 +36,7 @@ const document_js_1 = require("../document.cjs");
37
36
  * This strikes a balance between better targeted retrieval with small documents
38
37
  * and the more context-rich larger documents.
39
38
  */
40
- class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
39
+ class ParentDocumentRetriever extends multi_vector_js_1.MultiVectorRetriever {
41
40
  static lc_name() {
42
41
  return "ParentDocumentRetriever";
43
42
  }
@@ -55,12 +54,6 @@ class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
55
54
  writable: true,
56
55
  value: void 0
57
56
  });
58
- Object.defineProperty(this, "docstore", {
59
- enumerable: true,
60
- configurable: true,
61
- writable: true,
62
- value: void 0
63
- });
64
57
  Object.defineProperty(this, "childSplitter", {
65
58
  enumerable: true,
66
59
  configurable: true,
@@ -109,12 +102,9 @@ class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
109
102
  }
110
103
  }
111
104
  const parentDocs = [];
112
- for (const parentDocId of parentDocIds) {
113
- const parentDoc = await this.docstore.search(parentDocId);
114
- if (parentDoc !== undefined) {
115
- parentDocs.push(parentDoc);
116
- }
117
- }
105
+ const storedParentDocs = await this.docstore.mget(parentDocIds);
106
+ const retrievedDocs = storedParentDocs.filter((doc) => doc !== undefined);
107
+ parentDocs.push(...retrievedDocs);
118
108
  return parentDocs.slice(0, this.parentK);
119
109
  }
120
110
  /**
@@ -162,7 +152,7 @@ class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
162
152
  }
163
153
  await this.vectorstore.addDocuments(embeddedDocs);
164
154
  if (addToDocstore) {
165
- await this.docstore.add(fullDocs);
155
+ await this.docstore.mset(Object.entries(fullDocs));
166
156
  }
167
157
  }
168
158
  }