langchain 0.0.166 → 0.0.168
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/chat_models/cloudflare_workersai.cjs +1 -0
- package/chat_models/cloudflare_workersai.d.ts +1 -0
- package/chat_models/cloudflare_workersai.js +1 -0
- package/chat_models/fake.cjs +1 -0
- package/chat_models/fake.d.ts +1 -0
- package/chat_models/fake.js +1 -0
- package/dist/agents/chat/index.cjs +3 -2
- package/dist/agents/chat/index.d.ts +3 -0
- package/dist/agents/chat/index.js +3 -2
- package/dist/chat_models/cloudflare_workersai.cjs +140 -0
- package/dist/chat_models/cloudflare_workersai.d.ts +46 -0
- package/dist/chat_models/cloudflare_workersai.js +136 -0
- package/dist/chat_models/fake.cjs +101 -0
- package/dist/chat_models/fake.d.ts +36 -0
- package/dist/chat_models/fake.js +97 -0
- package/dist/embeddings/bedrock.cjs +43 -22
- package/dist/embeddings/bedrock.d.ts +11 -4
- package/dist/embeddings/bedrock.js +43 -22
- package/dist/llms/cloudflare_workersai.cjs +117 -0
- package/dist/llms/cloudflare_workersai.d.ts +49 -0
- package/dist/llms/cloudflare_workersai.js +113 -0
- package/dist/llms/fake.cjs +82 -0
- package/dist/llms/fake.d.ts +31 -0
- package/dist/llms/fake.js +78 -0
- package/dist/llms/sagemaker_endpoint.cjs +9 -7
- package/dist/llms/sagemaker_endpoint.d.ts +3 -3
- package/dist/llms/sagemaker_endpoint.js +9 -7
- package/dist/llms/yandex.cjs +100 -0
- package/dist/llms/yandex.d.ts +40 -0
- package/dist/llms/yandex.js +96 -0
- package/dist/load/import_constants.cjs +4 -0
- package/dist/load/import_constants.js +4 -0
- package/dist/load/import_map.cjs +8 -2
- package/dist/load/import_map.d.ts +6 -0
- package/dist/load/import_map.js +6 -0
- package/dist/retrievers/multi_vector.d.ts +3 -3
- package/dist/retrievers/parent_document.cjs +6 -16
- package/dist/retrievers/parent_document.d.ts +5 -12
- package/dist/retrievers/parent_document.js +6 -16
- package/dist/schema/storage.d.ts +28 -1
- package/dist/storage/encoder_backed.cjs +14 -2
- package/dist/storage/encoder_backed.d.ts +2 -0
- package/dist/storage/encoder_backed.js +12 -1
- package/dist/storage/in_memory.cjs +1 -1
- package/dist/storage/in_memory.js +1 -1
- package/dist/storage/ioredis.cjs +4 -4
- package/dist/storage/ioredis.js +4 -4
- package/dist/storage/vercel_kv.cjs +146 -0
- package/dist/storage/vercel_kv.d.ts +46 -0
- package/dist/storage/vercel_kv.js +142 -0
- package/dist/stores/doc/in_memory.cjs +13 -0
- package/dist/stores/doc/in_memory.d.ts +6 -1
- package/dist/stores/doc/in_memory.js +13 -0
- package/dist/util/axios-fetch-adapter.cjs +1 -1
- package/dist/util/axios-fetch-adapter.js +1 -1
- package/dist/util/env.cjs +1 -1
- package/dist/util/env.js +1 -1
- package/dist/util/event-source-parse.cjs +1 -1
- package/dist/util/event-source-parse.js +1 -1
- package/dist/vectorstores/cassandra.cjs +4 -2
- package/dist/vectorstores/cassandra.js +4 -2
- package/dist/vectorstores/closevector/common.cjs +128 -0
- package/dist/vectorstores/closevector/common.d.ts +82 -0
- package/dist/vectorstores/closevector/common.js +124 -0
- package/dist/vectorstores/closevector/node.cjs +109 -0
- package/dist/vectorstores/closevector/node.d.ts +83 -0
- package/dist/vectorstores/closevector/node.js +105 -0
- package/dist/vectorstores/closevector/web.cjs +109 -0
- package/dist/vectorstores/closevector/web.d.ts +80 -0
- package/dist/vectorstores/closevector/web.js +105 -0
- package/dist/vectorstores/elasticsearch.cjs +3 -1
- package/dist/vectorstores/elasticsearch.js +3 -1
- package/dist/vectorstores/neo4j_vector.cjs +578 -0
- package/dist/vectorstores/neo4j_vector.d.ts +61 -0
- package/dist/vectorstores/neo4j_vector.js +548 -0
- package/llms/cloudflare_workersai.cjs +1 -0
- package/llms/cloudflare_workersai.d.ts +1 -0
- package/llms/cloudflare_workersai.js +1 -0
- package/llms/fake.cjs +1 -0
- package/llms/fake.d.ts +1 -0
- package/llms/fake.js +1 -0
- package/llms/yandex.cjs +1 -0
- package/llms/yandex.d.ts +1 -0
- package/llms/yandex.js +1 -0
- package/package.json +105 -5
- package/storage/encoder_backed.cjs +1 -0
- package/storage/encoder_backed.d.ts +1 -0
- package/storage/encoder_backed.js +1 -0
- package/storage/vercel_kv.cjs +1 -0
- package/storage/vercel_kv.d.ts +1 -0
- package/storage/vercel_kv.js +1 -0
- package/vectorstores/closevector/node.cjs +1 -0
- package/vectorstores/closevector/node.d.ts +1 -0
- package/vectorstores/closevector/node.js +1 -0
- package/vectorstores/closevector/web.cjs +1 -0
- package/vectorstores/closevector/web.d.ts +1 -0
- package/vectorstores/closevector/web.js +1 -0
- package/vectorstores/neo4j_vector.cjs +1 -0
- package/vectorstores/neo4j_vector.d.ts +1 -0
- package/vectorstores/neo4j_vector.js +1 -0
|
@@ -131,18 +131,18 @@ class SageMakerEndpoint extends base_js_1.LLM {
|
|
|
131
131
|
* Calls the SageMaker endpoint and retrieves the result.
|
|
132
132
|
* @param {string} prompt The input prompt.
|
|
133
133
|
* @param {this["ParsedCallOptions"]} options Parsed call options.
|
|
134
|
-
* @param {CallbackManagerForLLMRun}
|
|
134
|
+
* @param {CallbackManagerForLLMRun} runManager Optional run manager.
|
|
135
135
|
* @returns {Promise<string>} A promise that resolves to the generated string.
|
|
136
136
|
*/
|
|
137
137
|
/** @ignore */
|
|
138
|
-
async _call(prompt, options,
|
|
138
|
+
async _call(prompt, options, runManager) {
|
|
139
139
|
return this.streaming
|
|
140
|
-
? await this.streamingCall(prompt, options)
|
|
140
|
+
? await this.streamingCall(prompt, options, runManager)
|
|
141
141
|
: await this.noStreamingCall(prompt, options);
|
|
142
142
|
}
|
|
143
|
-
async streamingCall(prompt, options) {
|
|
143
|
+
async streamingCall(prompt, options, runManager) {
|
|
144
144
|
const chunks = [];
|
|
145
|
-
for await (const chunk of this._streamResponseChunks(prompt, options)) {
|
|
145
|
+
for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
|
|
146
146
|
chunks.push(chunk.text);
|
|
147
147
|
}
|
|
148
148
|
return chunks.join("");
|
|
@@ -168,7 +168,7 @@ class SageMakerEndpoint extends base_js_1.LLM {
|
|
|
168
168
|
* @param {this["ParsedCallOptions"]} options Parsed call options.
|
|
169
169
|
* @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
|
|
170
170
|
*/
|
|
171
|
-
async *_streamResponseChunks(prompt, options) {
|
|
171
|
+
async *_streamResponseChunks(prompt, options, runManager) {
|
|
172
172
|
const body = await this.contentHandler.transformInput(prompt, this.modelKwargs ?? {});
|
|
173
173
|
const { contentType, accepts } = this.contentHandler;
|
|
174
174
|
const stream = await this.caller.call(() => this.client.send(new client_sagemaker_runtime_1.InvokeEndpointWithResponseStreamCommand({
|
|
@@ -183,13 +183,15 @@ class SageMakerEndpoint extends base_js_1.LLM {
|
|
|
183
183
|
}
|
|
184
184
|
for await (const chunk of stream.Body) {
|
|
185
185
|
if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
|
|
186
|
+
const text = await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes);
|
|
186
187
|
yield new index_js_1.GenerationChunk({
|
|
187
|
-
text
|
|
188
|
+
text,
|
|
188
189
|
generationInfo: {
|
|
189
190
|
...chunk,
|
|
190
191
|
response: undefined,
|
|
191
192
|
},
|
|
192
193
|
});
|
|
194
|
+
await runManager?.handleLLMNewToken(text);
|
|
193
195
|
}
|
|
194
196
|
else if (chunk.InternalStreamFailure) {
|
|
195
197
|
throw new Error(chunk.InternalStreamFailure.message);
|
|
@@ -104,11 +104,11 @@ export declare class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
|
|
|
104
104
|
* Calls the SageMaker endpoint and retrieves the result.
|
|
105
105
|
* @param {string} prompt The input prompt.
|
|
106
106
|
* @param {this["ParsedCallOptions"]} options Parsed call options.
|
|
107
|
-
* @param {CallbackManagerForLLMRun}
|
|
107
|
+
* @param {CallbackManagerForLLMRun} runManager Optional run manager.
|
|
108
108
|
* @returns {Promise<string>} A promise that resolves to the generated string.
|
|
109
109
|
*/
|
|
110
110
|
/** @ignore */
|
|
111
|
-
_call(prompt: string, options: this["ParsedCallOptions"],
|
|
111
|
+
_call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
112
112
|
private streamingCall;
|
|
113
113
|
private noStreamingCall;
|
|
114
114
|
/**
|
|
@@ -117,5 +117,5 @@ export declare class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
|
|
|
117
117
|
* @param {this["ParsedCallOptions"]} options Parsed call options.
|
|
118
118
|
* @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
|
|
119
119
|
*/
|
|
120
|
-
_streamResponseChunks(prompt: string, options: this["ParsedCallOptions"]): AsyncGenerator<GenerationChunk>;
|
|
120
|
+
_streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
|
|
121
121
|
}
|
|
@@ -127,18 +127,18 @@ export class SageMakerEndpoint extends LLM {
|
|
|
127
127
|
* Calls the SageMaker endpoint and retrieves the result.
|
|
128
128
|
* @param {string} prompt The input prompt.
|
|
129
129
|
* @param {this["ParsedCallOptions"]} options Parsed call options.
|
|
130
|
-
* @param {CallbackManagerForLLMRun}
|
|
130
|
+
* @param {CallbackManagerForLLMRun} runManager Optional run manager.
|
|
131
131
|
* @returns {Promise<string>} A promise that resolves to the generated string.
|
|
132
132
|
*/
|
|
133
133
|
/** @ignore */
|
|
134
|
-
async _call(prompt, options,
|
|
134
|
+
async _call(prompt, options, runManager) {
|
|
135
135
|
return this.streaming
|
|
136
|
-
? await this.streamingCall(prompt, options)
|
|
136
|
+
? await this.streamingCall(prompt, options, runManager)
|
|
137
137
|
: await this.noStreamingCall(prompt, options);
|
|
138
138
|
}
|
|
139
|
-
async streamingCall(prompt, options) {
|
|
139
|
+
async streamingCall(prompt, options, runManager) {
|
|
140
140
|
const chunks = [];
|
|
141
|
-
for await (const chunk of this._streamResponseChunks(prompt, options)) {
|
|
141
|
+
for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
|
|
142
142
|
chunks.push(chunk.text);
|
|
143
143
|
}
|
|
144
144
|
return chunks.join("");
|
|
@@ -164,7 +164,7 @@ export class SageMakerEndpoint extends LLM {
|
|
|
164
164
|
* @param {this["ParsedCallOptions"]} options Parsed call options.
|
|
165
165
|
* @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
|
|
166
166
|
*/
|
|
167
|
-
async *_streamResponseChunks(prompt, options) {
|
|
167
|
+
async *_streamResponseChunks(prompt, options, runManager) {
|
|
168
168
|
const body = await this.contentHandler.transformInput(prompt, this.modelKwargs ?? {});
|
|
169
169
|
const { contentType, accepts } = this.contentHandler;
|
|
170
170
|
const stream = await this.caller.call(() => this.client.send(new InvokeEndpointWithResponseStreamCommand({
|
|
@@ -179,13 +179,15 @@ export class SageMakerEndpoint extends LLM {
|
|
|
179
179
|
}
|
|
180
180
|
for await (const chunk of stream.Body) {
|
|
181
181
|
if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
|
|
182
|
+
const text = await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes);
|
|
182
183
|
yield new GenerationChunk({
|
|
183
|
-
text
|
|
184
|
+
text,
|
|
184
185
|
generationInfo: {
|
|
185
186
|
...chunk,
|
|
186
187
|
response: undefined,
|
|
187
188
|
},
|
|
188
189
|
});
|
|
190
|
+
await runManager?.handleLLMNewToken(text);
|
|
189
191
|
}
|
|
190
192
|
else if (chunk.InternalStreamFailure) {
|
|
191
193
|
throw new Error(chunk.InternalStreamFailure.message);
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.YandexGPT = void 0;
|
|
4
|
+
const env_js_1 = require("../util/env.cjs");
|
|
5
|
+
const base_js_1 = require("./base.cjs");
|
|
6
|
+
const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/instruct";
|
|
7
|
+
class YandexGPT extends base_js_1.LLM {
|
|
8
|
+
static lc_name() {
|
|
9
|
+
return "Yandex GPT";
|
|
10
|
+
}
|
|
11
|
+
get lc_secrets() {
|
|
12
|
+
return {
|
|
13
|
+
apiKey: "YC_API_KEY",
|
|
14
|
+
iamToken: "YC_IAM_TOKEN",
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
constructor(fields) {
|
|
18
|
+
super(fields ?? {});
|
|
19
|
+
Object.defineProperty(this, "temperature", {
|
|
20
|
+
enumerable: true,
|
|
21
|
+
configurable: true,
|
|
22
|
+
writable: true,
|
|
23
|
+
value: 0.6
|
|
24
|
+
});
|
|
25
|
+
Object.defineProperty(this, "maxTokens", {
|
|
26
|
+
enumerable: true,
|
|
27
|
+
configurable: true,
|
|
28
|
+
writable: true,
|
|
29
|
+
value: 1700
|
|
30
|
+
});
|
|
31
|
+
Object.defineProperty(this, "model", {
|
|
32
|
+
enumerable: true,
|
|
33
|
+
configurable: true,
|
|
34
|
+
writable: true,
|
|
35
|
+
value: "general"
|
|
36
|
+
});
|
|
37
|
+
Object.defineProperty(this, "apiKey", {
|
|
38
|
+
enumerable: true,
|
|
39
|
+
configurable: true,
|
|
40
|
+
writable: true,
|
|
41
|
+
value: void 0
|
|
42
|
+
});
|
|
43
|
+
Object.defineProperty(this, "iamToken", {
|
|
44
|
+
enumerable: true,
|
|
45
|
+
configurable: true,
|
|
46
|
+
writable: true,
|
|
47
|
+
value: void 0
|
|
48
|
+
});
|
|
49
|
+
const apiKey = fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("YC_API_KEY");
|
|
50
|
+
const iamToken = fields?.iamToken ?? (0, env_js_1.getEnvironmentVariable)("YC_IAM_TOKEN");
|
|
51
|
+
if (apiKey === undefined && iamToken === undefined) {
|
|
52
|
+
throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
|
|
53
|
+
}
|
|
54
|
+
this.apiKey = apiKey;
|
|
55
|
+
this.iamToken = iamToken;
|
|
56
|
+
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
57
|
+
this.temperature = fields?.temperature ?? this.temperature;
|
|
58
|
+
this.model = fields?.model ?? this.model;
|
|
59
|
+
}
|
|
60
|
+
_llmType() {
|
|
61
|
+
return "yandexgpt";
|
|
62
|
+
}
|
|
63
|
+
/** @ignore */
|
|
64
|
+
async _call(prompt, options) {
|
|
65
|
+
// Hit the `generate` endpoint on the `large` model
|
|
66
|
+
return this.caller.callWithOptions({ signal: options.signal }, async () => {
|
|
67
|
+
const headers = { "Content-Type": "application/json", Authorization: "" };
|
|
68
|
+
if (this.apiKey !== undefined) {
|
|
69
|
+
headers.Authorization = `Api-Key ${this.apiKey}`;
|
|
70
|
+
}
|
|
71
|
+
else {
|
|
72
|
+
headers.Authorization = `Bearer ${this.iamToken}`;
|
|
73
|
+
}
|
|
74
|
+
const bodyData = {
|
|
75
|
+
model: this.model,
|
|
76
|
+
generationOptions: {
|
|
77
|
+
temperature: this.temperature,
|
|
78
|
+
maxTokens: this.maxTokens,
|
|
79
|
+
},
|
|
80
|
+
requestText: prompt,
|
|
81
|
+
};
|
|
82
|
+
try {
|
|
83
|
+
const response = await fetch(apiUrl, {
|
|
84
|
+
method: "POST",
|
|
85
|
+
headers,
|
|
86
|
+
body: JSON.stringify(bodyData),
|
|
87
|
+
});
|
|
88
|
+
if (!response.ok) {
|
|
89
|
+
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
|
|
90
|
+
}
|
|
91
|
+
const responseData = await response.json();
|
|
92
|
+
return responseData.result.alternatives[0].text;
|
|
93
|
+
}
|
|
94
|
+
catch (error) {
|
|
95
|
+
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
|
|
96
|
+
}
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
exports.YandexGPT = YandexGPT;
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { LLM, BaseLLMParams } from "./base.js";
|
|
2
|
+
export interface YandexGPTInputs extends BaseLLMParams {
|
|
3
|
+
/**
|
|
4
|
+
* What sampling temperature to use.
|
|
5
|
+
* Should be a double number between 0 (inclusive) and 1 (inclusive).
|
|
6
|
+
*/
|
|
7
|
+
temperature?: number;
|
|
8
|
+
/**
|
|
9
|
+
* Maximum limit on the total number of tokens
|
|
10
|
+
* used for both the input prompt and the generated response.
|
|
11
|
+
*/
|
|
12
|
+
maxTokens?: number;
|
|
13
|
+
/** Model name to use. */
|
|
14
|
+
model?: string;
|
|
15
|
+
/**
|
|
16
|
+
* Yandex Cloud Api Key for service account
|
|
17
|
+
* with the `ai.languageModels.user` role.
|
|
18
|
+
*/
|
|
19
|
+
apiKey?: string;
|
|
20
|
+
/**
|
|
21
|
+
* Yandex Cloud IAM token for service account
|
|
22
|
+
* with the `ai.languageModels.user` role.
|
|
23
|
+
*/
|
|
24
|
+
iamToken?: string;
|
|
25
|
+
}
|
|
26
|
+
export declare class YandexGPT extends LLM implements YandexGPTInputs {
|
|
27
|
+
static lc_name(): string;
|
|
28
|
+
get lc_secrets(): {
|
|
29
|
+
[key: string]: string;
|
|
30
|
+
} | undefined;
|
|
31
|
+
temperature: number;
|
|
32
|
+
maxTokens: number;
|
|
33
|
+
model: string;
|
|
34
|
+
apiKey?: string;
|
|
35
|
+
iamToken?: string;
|
|
36
|
+
constructor(fields?: YandexGPTInputs);
|
|
37
|
+
_llmType(): string;
|
|
38
|
+
/** @ignore */
|
|
39
|
+
_call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
|
|
40
|
+
}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
2
|
+
import { LLM } from "./base.js";
|
|
3
|
+
const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/instruct";
|
|
4
|
+
export class YandexGPT extends LLM {
|
|
5
|
+
static lc_name() {
|
|
6
|
+
return "Yandex GPT";
|
|
7
|
+
}
|
|
8
|
+
get lc_secrets() {
|
|
9
|
+
return {
|
|
10
|
+
apiKey: "YC_API_KEY",
|
|
11
|
+
iamToken: "YC_IAM_TOKEN",
|
|
12
|
+
};
|
|
13
|
+
}
|
|
14
|
+
constructor(fields) {
|
|
15
|
+
super(fields ?? {});
|
|
16
|
+
Object.defineProperty(this, "temperature", {
|
|
17
|
+
enumerable: true,
|
|
18
|
+
configurable: true,
|
|
19
|
+
writable: true,
|
|
20
|
+
value: 0.6
|
|
21
|
+
});
|
|
22
|
+
Object.defineProperty(this, "maxTokens", {
|
|
23
|
+
enumerable: true,
|
|
24
|
+
configurable: true,
|
|
25
|
+
writable: true,
|
|
26
|
+
value: 1700
|
|
27
|
+
});
|
|
28
|
+
Object.defineProperty(this, "model", {
|
|
29
|
+
enumerable: true,
|
|
30
|
+
configurable: true,
|
|
31
|
+
writable: true,
|
|
32
|
+
value: "general"
|
|
33
|
+
});
|
|
34
|
+
Object.defineProperty(this, "apiKey", {
|
|
35
|
+
enumerable: true,
|
|
36
|
+
configurable: true,
|
|
37
|
+
writable: true,
|
|
38
|
+
value: void 0
|
|
39
|
+
});
|
|
40
|
+
Object.defineProperty(this, "iamToken", {
|
|
41
|
+
enumerable: true,
|
|
42
|
+
configurable: true,
|
|
43
|
+
writable: true,
|
|
44
|
+
value: void 0
|
|
45
|
+
});
|
|
46
|
+
const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
|
|
47
|
+
const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
|
|
48
|
+
if (apiKey === undefined && iamToken === undefined) {
|
|
49
|
+
throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
|
|
50
|
+
}
|
|
51
|
+
this.apiKey = apiKey;
|
|
52
|
+
this.iamToken = iamToken;
|
|
53
|
+
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
54
|
+
this.temperature = fields?.temperature ?? this.temperature;
|
|
55
|
+
this.model = fields?.model ?? this.model;
|
|
56
|
+
}
|
|
57
|
+
_llmType() {
|
|
58
|
+
return "yandexgpt";
|
|
59
|
+
}
|
|
60
|
+
/** @ignore */
|
|
61
|
+
async _call(prompt, options) {
|
|
62
|
+
// Hit the `generate` endpoint on the `large` model
|
|
63
|
+
return this.caller.callWithOptions({ signal: options.signal }, async () => {
|
|
64
|
+
const headers = { "Content-Type": "application/json", Authorization: "" };
|
|
65
|
+
if (this.apiKey !== undefined) {
|
|
66
|
+
headers.Authorization = `Api-Key ${this.apiKey}`;
|
|
67
|
+
}
|
|
68
|
+
else {
|
|
69
|
+
headers.Authorization = `Bearer ${this.iamToken}`;
|
|
70
|
+
}
|
|
71
|
+
const bodyData = {
|
|
72
|
+
model: this.model,
|
|
73
|
+
generationOptions: {
|
|
74
|
+
temperature: this.temperature,
|
|
75
|
+
maxTokens: this.maxTokens,
|
|
76
|
+
},
|
|
77
|
+
requestText: prompt,
|
|
78
|
+
};
|
|
79
|
+
try {
|
|
80
|
+
const response = await fetch(apiUrl, {
|
|
81
|
+
method: "POST",
|
|
82
|
+
headers,
|
|
83
|
+
body: JSON.stringify(bodyData),
|
|
84
|
+
});
|
|
85
|
+
if (!response.ok) {
|
|
86
|
+
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
|
|
87
|
+
}
|
|
88
|
+
const responseData = await response.json();
|
|
89
|
+
return responseData.result.alternatives[0].text;
|
|
90
|
+
}
|
|
91
|
+
catch (error) {
|
|
92
|
+
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT ${error}`);
|
|
93
|
+
}
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
}
|
|
@@ -43,6 +43,8 @@ exports.optionalImportEntrypoints = [
|
|
|
43
43
|
"langchain/vectorstores/cassandra",
|
|
44
44
|
"langchain/vectorstores/elasticsearch",
|
|
45
45
|
"langchain/vectorstores/cloudflare_vectorize",
|
|
46
|
+
"langchain/vectorstores/closevector/web",
|
|
47
|
+
"langchain/vectorstores/closevector/node",
|
|
46
48
|
"langchain/vectorstores/chroma",
|
|
47
49
|
"langchain/vectorstores/googlevertexai",
|
|
48
50
|
"langchain/vectorstores/hnswlib",
|
|
@@ -58,6 +60,7 @@ exports.optionalImportEntrypoints = [
|
|
|
58
60
|
"langchain/vectorstores/opensearch",
|
|
59
61
|
"langchain/vectorstores/pgvector",
|
|
60
62
|
"langchain/vectorstores/milvus",
|
|
63
|
+
"langchain/vectorstores/neo4j_vector",
|
|
61
64
|
"langchain/vectorstores/typeorm",
|
|
62
65
|
"langchain/vectorstores/myscale",
|
|
63
66
|
"langchain/vectorstores/redis",
|
|
@@ -140,6 +143,7 @@ exports.optionalImportEntrypoints = [
|
|
|
140
143
|
"langchain/stores/message/planetscale",
|
|
141
144
|
"langchain/stores/message/xata",
|
|
142
145
|
"langchain/storage/ioredis",
|
|
146
|
+
"langchain/storage/vercel_kv",
|
|
143
147
|
"langchain/graphs/neo4j_graph",
|
|
144
148
|
"langchain/hub",
|
|
145
149
|
"langchain/experimental/multimodal_embeddings/googlevertexai",
|
|
@@ -40,6 +40,8 @@ export const optionalImportEntrypoints = [
|
|
|
40
40
|
"langchain/vectorstores/cassandra",
|
|
41
41
|
"langchain/vectorstores/elasticsearch",
|
|
42
42
|
"langchain/vectorstores/cloudflare_vectorize",
|
|
43
|
+
"langchain/vectorstores/closevector/web",
|
|
44
|
+
"langchain/vectorstores/closevector/node",
|
|
43
45
|
"langchain/vectorstores/chroma",
|
|
44
46
|
"langchain/vectorstores/googlevertexai",
|
|
45
47
|
"langchain/vectorstores/hnswlib",
|
|
@@ -55,6 +57,7 @@ export const optionalImportEntrypoints = [
|
|
|
55
57
|
"langchain/vectorstores/opensearch",
|
|
56
58
|
"langchain/vectorstores/pgvector",
|
|
57
59
|
"langchain/vectorstores/milvus",
|
|
60
|
+
"langchain/vectorstores/neo4j_vector",
|
|
58
61
|
"langchain/vectorstores/typeorm",
|
|
59
62
|
"langchain/vectorstores/myscale",
|
|
60
63
|
"langchain/vectorstores/redis",
|
|
@@ -137,6 +140,7 @@ export const optionalImportEntrypoints = [
|
|
|
137
140
|
"langchain/stores/message/planetscale",
|
|
138
141
|
"langchain/stores/message/xata",
|
|
139
142
|
"langchain/storage/ioredis",
|
|
143
|
+
"langchain/storage/vercel_kv",
|
|
140
144
|
"langchain/graphs/neo4j_graph",
|
|
141
145
|
"langchain/hub",
|
|
142
146
|
"langchain/experimental/multimodal_embeddings/googlevertexai",
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
24
24
|
return result;
|
|
25
25
|
};
|
|
26
26
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
|
-
exports.
|
|
28
|
-
exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = void 0;
|
|
27
|
+
exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
|
|
28
|
+
exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = void 0;
|
|
29
29
|
exports.load__serializable = __importStar(require("../load/serializable.cjs"));
|
|
30
30
|
exports.agents = __importStar(require("../agents/index.cjs"));
|
|
31
31
|
exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
|
|
@@ -43,8 +43,11 @@ exports.llms__base = __importStar(require("../llms/base.cjs"));
|
|
|
43
43
|
exports.llms__openai = __importStar(require("../llms/openai.cjs"));
|
|
44
44
|
exports.llms__ai21 = __importStar(require("../llms/ai21.cjs"));
|
|
45
45
|
exports.llms__aleph_alpha = __importStar(require("../llms/aleph_alpha.cjs"));
|
|
46
|
+
exports.llms__cloudflare_workersai = __importStar(require("../llms/cloudflare_workersai.cjs"));
|
|
46
47
|
exports.llms__ollama = __importStar(require("../llms/ollama.cjs"));
|
|
47
48
|
exports.llms__fireworks = __importStar(require("../llms/fireworks.cjs"));
|
|
49
|
+
exports.llms__yandex = __importStar(require("../llms/yandex.cjs"));
|
|
50
|
+
exports.llms__fake = __importStar(require("../llms/fake.cjs"));
|
|
48
51
|
exports.prompts = __importStar(require("../prompts/index.cjs"));
|
|
49
52
|
exports.vectorstores__base = __importStar(require("../vectorstores/base.cjs"));
|
|
50
53
|
exports.vectorstores__memory = __importStar(require("../vectorstores/memory.cjs"));
|
|
@@ -62,10 +65,12 @@ exports.document_transformers__openai_functions = __importStar(require("../docum
|
|
|
62
65
|
exports.chat_models__base = __importStar(require("../chat_models/base.cjs"));
|
|
63
66
|
exports.chat_models__openai = __importStar(require("../chat_models/openai.cjs"));
|
|
64
67
|
exports.chat_models__anthropic = __importStar(require("../chat_models/anthropic.cjs"));
|
|
68
|
+
exports.chat_models__cloudflare_workersai = __importStar(require("../chat_models/cloudflare_workersai.cjs"));
|
|
65
69
|
exports.chat_models__fireworks = __importStar(require("../chat_models/fireworks.cjs"));
|
|
66
70
|
exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwenxin.cjs"));
|
|
67
71
|
exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
|
|
68
72
|
exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
|
|
73
|
+
exports.chat_models__fake = __importStar(require("../chat_models/fake.cjs"));
|
|
69
74
|
exports.schema = __importStar(require("../schema/index.cjs"));
|
|
70
75
|
exports.schema__document = __importStar(require("../schema/document.cjs"));
|
|
71
76
|
exports.schema__output_parser = __importStar(require("../schema/output_parser.cjs"));
|
|
@@ -94,6 +99,7 @@ exports.cache = __importStar(require("../cache/index.cjs"));
|
|
|
94
99
|
exports.stores__doc__in_memory = __importStar(require("../stores/doc/in_memory.cjs"));
|
|
95
100
|
exports.stores__file__in_memory = __importStar(require("../stores/file/in_memory.cjs"));
|
|
96
101
|
exports.stores__message__in_memory = __importStar(require("../stores/message/in_memory.cjs"));
|
|
102
|
+
exports.storage__encoder_backed = __importStar(require("../storage/encoder_backed.cjs"));
|
|
97
103
|
exports.storage__in_memory = __importStar(require("../storage/in_memory.cjs"));
|
|
98
104
|
exports.util__math = __importStar(require("../util/math.cjs"));
|
|
99
105
|
exports.util__time = __importStar(require("../util/time.cjs"));
|
|
@@ -15,8 +15,11 @@ export * as llms__base from "../llms/base.js";
|
|
|
15
15
|
export * as llms__openai from "../llms/openai.js";
|
|
16
16
|
export * as llms__ai21 from "../llms/ai21.js";
|
|
17
17
|
export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
|
|
18
|
+
export * as llms__cloudflare_workersai from "../llms/cloudflare_workersai.js";
|
|
18
19
|
export * as llms__ollama from "../llms/ollama.js";
|
|
19
20
|
export * as llms__fireworks from "../llms/fireworks.js";
|
|
21
|
+
export * as llms__yandex from "../llms/yandex.js";
|
|
22
|
+
export * as llms__fake from "../llms/fake.js";
|
|
20
23
|
export * as prompts from "../prompts/index.js";
|
|
21
24
|
export * as vectorstores__base from "../vectorstores/base.js";
|
|
22
25
|
export * as vectorstores__memory from "../vectorstores/memory.js";
|
|
@@ -34,10 +37,12 @@ export * as document_transformers__openai_functions from "../document_transforme
|
|
|
34
37
|
export * as chat_models__base from "../chat_models/base.js";
|
|
35
38
|
export * as chat_models__openai from "../chat_models/openai.js";
|
|
36
39
|
export * as chat_models__anthropic from "../chat_models/anthropic.js";
|
|
40
|
+
export * as chat_models__cloudflare_workersai from "../chat_models/cloudflare_workersai.js";
|
|
37
41
|
export * as chat_models__fireworks from "../chat_models/fireworks.js";
|
|
38
42
|
export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
|
|
39
43
|
export * as chat_models__ollama from "../chat_models/ollama.js";
|
|
40
44
|
export * as chat_models__minimax from "../chat_models/minimax.js";
|
|
45
|
+
export * as chat_models__fake from "../chat_models/fake.js";
|
|
41
46
|
export * as schema from "../schema/index.js";
|
|
42
47
|
export * as schema__document from "../schema/document.js";
|
|
43
48
|
export * as schema__output_parser from "../schema/output_parser.js";
|
|
@@ -66,6 +71,7 @@ export * as cache from "../cache/index.js";
|
|
|
66
71
|
export * as stores__doc__in_memory from "../stores/doc/in_memory.js";
|
|
67
72
|
export * as stores__file__in_memory from "../stores/file/in_memory.js";
|
|
68
73
|
export * as stores__message__in_memory from "../stores/message/in_memory.js";
|
|
74
|
+
export * as storage__encoder_backed from "../storage/encoder_backed.js";
|
|
69
75
|
export * as storage__in_memory from "../storage/in_memory.js";
|
|
70
76
|
export * as util__math from "../util/math.js";
|
|
71
77
|
export * as util__time from "../util/time.js";
|
package/dist/load/import_map.js
CHANGED
|
@@ -16,8 +16,11 @@ export * as llms__base from "../llms/base.js";
|
|
|
16
16
|
export * as llms__openai from "../llms/openai.js";
|
|
17
17
|
export * as llms__ai21 from "../llms/ai21.js";
|
|
18
18
|
export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
|
|
19
|
+
export * as llms__cloudflare_workersai from "../llms/cloudflare_workersai.js";
|
|
19
20
|
export * as llms__ollama from "../llms/ollama.js";
|
|
20
21
|
export * as llms__fireworks from "../llms/fireworks.js";
|
|
22
|
+
export * as llms__yandex from "../llms/yandex.js";
|
|
23
|
+
export * as llms__fake from "../llms/fake.js";
|
|
21
24
|
export * as prompts from "../prompts/index.js";
|
|
22
25
|
export * as vectorstores__base from "../vectorstores/base.js";
|
|
23
26
|
export * as vectorstores__memory from "../vectorstores/memory.js";
|
|
@@ -35,10 +38,12 @@ export * as document_transformers__openai_functions from "../document_transforme
|
|
|
35
38
|
export * as chat_models__base from "../chat_models/base.js";
|
|
36
39
|
export * as chat_models__openai from "../chat_models/openai.js";
|
|
37
40
|
export * as chat_models__anthropic from "../chat_models/anthropic.js";
|
|
41
|
+
export * as chat_models__cloudflare_workersai from "../chat_models/cloudflare_workersai.js";
|
|
38
42
|
export * as chat_models__fireworks from "../chat_models/fireworks.js";
|
|
39
43
|
export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
|
|
40
44
|
export * as chat_models__ollama from "../chat_models/ollama.js";
|
|
41
45
|
export * as chat_models__minimax from "../chat_models/minimax.js";
|
|
46
|
+
export * as chat_models__fake from "../chat_models/fake.js";
|
|
42
47
|
export * as schema from "../schema/index.js";
|
|
43
48
|
export * as schema__document from "../schema/document.js";
|
|
44
49
|
export * as schema__output_parser from "../schema/output_parser.js";
|
|
@@ -67,6 +72,7 @@ export * as cache from "../cache/index.js";
|
|
|
67
72
|
export * as stores__doc__in_memory from "../stores/doc/in_memory.js";
|
|
68
73
|
export * as stores__file__in_memory from "../stores/file/in_memory.js";
|
|
69
74
|
export * as stores__message__in_memory from "../stores/message/in_memory.js";
|
|
75
|
+
export * as storage__encoder_backed from "../storage/encoder_backed.js";
|
|
70
76
|
export * as storage__in_memory from "../storage/in_memory.js";
|
|
71
77
|
export * as util__math from "../util/math.js";
|
|
72
78
|
export * as util__time from "../util/time.js";
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { BaseStoreInterface } from "../schema/storage.js";
|
|
2
2
|
import { Document } from "../document.js";
|
|
3
3
|
import { BaseRetriever, BaseRetrieverInput } from "../schema/retriever.js";
|
|
4
4
|
import { VectorStore } from "../vectorstores/base.js";
|
|
@@ -7,7 +7,7 @@ import { VectorStore } from "../vectorstores/base.js";
|
|
|
7
7
|
*/
|
|
8
8
|
export interface MultiVectorRetrieverInput extends BaseRetrieverInput {
|
|
9
9
|
vectorstore: VectorStore;
|
|
10
|
-
docstore:
|
|
10
|
+
docstore: BaseStoreInterface<string, Document>;
|
|
11
11
|
idKey?: string;
|
|
12
12
|
childK?: number;
|
|
13
13
|
parentK?: number;
|
|
@@ -21,7 +21,7 @@ export declare class MultiVectorRetriever extends BaseRetriever {
|
|
|
21
21
|
static lc_name(): string;
|
|
22
22
|
lc_namespace: string[];
|
|
23
23
|
vectorstore: VectorStore;
|
|
24
|
-
docstore:
|
|
24
|
+
docstore: BaseStoreInterface<string, Document>;
|
|
25
25
|
protected idKey: string;
|
|
26
26
|
protected childK?: number;
|
|
27
27
|
protected parentK?: number;
|
|
@@ -25,9 +25,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
25
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
26
26
|
exports.ParentDocumentRetriever = void 0;
|
|
27
27
|
const uuid = __importStar(require("uuid"));
|
|
28
|
-
const retriever_js_1 = require("../schema/retriever.cjs");
|
|
29
28
|
const document_js_1 = require("../document.cjs");
|
|
30
|
-
|
|
29
|
+
const multi_vector_js_1 = require("./multi_vector.cjs");
|
|
31
30
|
/**
|
|
32
31
|
* A type of document retriever that splits input documents into smaller chunks
|
|
33
32
|
* while separately storing and preserving the original documents.
|
|
@@ -37,7 +36,7 @@ const document_js_1 = require("../document.cjs");
|
|
|
37
36
|
* This strikes a balance between better targeted retrieval with small documents
|
|
38
37
|
* and the more context-rich larger documents.
|
|
39
38
|
*/
|
|
40
|
-
class ParentDocumentRetriever extends
|
|
39
|
+
class ParentDocumentRetriever extends multi_vector_js_1.MultiVectorRetriever {
|
|
41
40
|
static lc_name() {
|
|
42
41
|
return "ParentDocumentRetriever";
|
|
43
42
|
}
|
|
@@ -55,12 +54,6 @@ class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
|
|
|
55
54
|
writable: true,
|
|
56
55
|
value: void 0
|
|
57
56
|
});
|
|
58
|
-
Object.defineProperty(this, "docstore", {
|
|
59
|
-
enumerable: true,
|
|
60
|
-
configurable: true,
|
|
61
|
-
writable: true,
|
|
62
|
-
value: void 0
|
|
63
|
-
});
|
|
64
57
|
Object.defineProperty(this, "childSplitter", {
|
|
65
58
|
enumerable: true,
|
|
66
59
|
configurable: true,
|
|
@@ -109,12 +102,9 @@ class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
|
|
|
109
102
|
}
|
|
110
103
|
}
|
|
111
104
|
const parentDocs = [];
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
parentDocs.push(parentDoc);
|
|
116
|
-
}
|
|
117
|
-
}
|
|
105
|
+
const storedParentDocs = await this.docstore.mget(parentDocIds);
|
|
106
|
+
const retrievedDocs = storedParentDocs.filter((doc) => doc !== undefined);
|
|
107
|
+
parentDocs.push(...retrievedDocs);
|
|
118
108
|
return parentDocs.slice(0, this.parentK);
|
|
119
109
|
}
|
|
120
110
|
/**
|
|
@@ -162,7 +152,7 @@ class ParentDocumentRetriever extends retriever_js_1.BaseRetriever {
|
|
|
162
152
|
}
|
|
163
153
|
await this.vectorstore.addDocuments(embeddedDocs);
|
|
164
154
|
if (addToDocstore) {
|
|
165
|
-
await this.docstore.
|
|
155
|
+
await this.docstore.mset(Object.entries(fullDocs));
|
|
166
156
|
}
|
|
167
157
|
}
|
|
168
158
|
}
|
|
@@ -1,21 +1,15 @@
|
|
|
1
|
-
import { BaseRetriever, BaseRetrieverInput } from "../schema/retriever.js";
|
|
2
1
|
import { Document } from "../document.js";
|
|
3
2
|
import { VectorStore } from "../vectorstores/base.js";
|
|
4
|
-
import { Docstore } from "../schema/index.js";
|
|
5
3
|
import { TextSplitter } from "../text_splitter.js";
|
|
4
|
+
import { MultiVectorRetriever, type MultiVectorRetrieverInput } from "./multi_vector.js";
|
|
6
5
|
/**
|
|
7
6
|
* Interface for the fields required to initialize a
|
|
8
7
|
* ParentDocumentRetriever instance.
|
|
9
8
|
*/
|
|
10
|
-
export
|
|
11
|
-
vectorstore: VectorStore;
|
|
12
|
-
docstore: Docstore;
|
|
9
|
+
export type ParentDocumentRetrieverFields = MultiVectorRetrieverInput & {
|
|
13
10
|
childSplitter: TextSplitter;
|
|
14
11
|
parentSplitter?: TextSplitter;
|
|
15
|
-
|
|
16
|
-
childK?: number;
|
|
17
|
-
parentK?: number;
|
|
18
|
-
}
|
|
12
|
+
};
|
|
19
13
|
/**
|
|
20
14
|
* A type of document retriever that splits input documents into smaller chunks
|
|
21
15
|
* while separately storing and preserving the original documents.
|
|
@@ -25,11 +19,10 @@ export interface ParentDocumentRetrieverFields extends BaseRetrieverInput {
|
|
|
25
19
|
* This strikes a balance between better targeted retrieval with small documents
|
|
26
20
|
* and the more context-rich larger documents.
|
|
27
21
|
*/
|
|
28
|
-
export declare class ParentDocumentRetriever extends
|
|
22
|
+
export declare class ParentDocumentRetriever extends MultiVectorRetriever {
|
|
29
23
|
static lc_name(): string;
|
|
30
24
|
lc_namespace: string[];
|
|
31
|
-
|
|
32
|
-
protected docstore: Docstore;
|
|
25
|
+
vectorstore: VectorStore;
|
|
33
26
|
protected childSplitter: TextSplitter;
|
|
34
27
|
protected parentSplitter?: TextSplitter;
|
|
35
28
|
protected idKey: string;
|