langchain 0.0.167 → 0.0.169

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +4 -4
  2. package/chat_models/cloudflare_workersai.cjs +1 -0
  3. package/chat_models/cloudflare_workersai.d.ts +1 -0
  4. package/chat_models/cloudflare_workersai.js +1 -0
  5. package/chat_models/fake.cjs +1 -0
  6. package/chat_models/fake.d.ts +1 -0
  7. package/chat_models/fake.js +1 -0
  8. package/chat_models/yandex.cjs +1 -0
  9. package/chat_models/yandex.d.ts +1 -0
  10. package/chat_models/yandex.js +1 -0
  11. package/dist/agents/chat/index.cjs +3 -2
  12. package/dist/agents/chat/index.d.ts +3 -0
  13. package/dist/agents/chat/index.js +3 -2
  14. package/dist/callbacks/handlers/llmonitor.cjs +21 -17
  15. package/dist/callbacks/handlers/llmonitor.js +21 -17
  16. package/dist/chat_models/cloudflare_workersai.cjs +145 -0
  17. package/dist/chat_models/cloudflare_workersai.d.ts +46 -0
  18. package/dist/chat_models/cloudflare_workersai.js +141 -0
  19. package/dist/chat_models/fake.cjs +101 -0
  20. package/dist/chat_models/fake.d.ts +36 -0
  21. package/dist/chat_models/fake.js +97 -0
  22. package/dist/chat_models/yandex.cjs +117 -0
  23. package/dist/chat_models/yandex.d.ts +16 -0
  24. package/dist/chat_models/yandex.js +113 -0
  25. package/dist/evaluation/comparison/prompt.d.ts +2 -2
  26. package/dist/experimental/chains/violation_of_expectations/index.cjs +5 -0
  27. package/dist/experimental/chains/violation_of_expectations/index.d.ts +1 -0
  28. package/dist/experimental/chains/violation_of_expectations/index.js +1 -0
  29. package/dist/experimental/chains/violation_of_expectations/types.cjs +49 -0
  30. package/dist/experimental/chains/violation_of_expectations/types.d.ts +69 -0
  31. package/dist/experimental/chains/violation_of_expectations/types.js +46 -0
  32. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +328 -0
  33. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.d.ts +148 -0
  34. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +324 -0
  35. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.cjs +49 -0
  36. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.d.ts +5 -0
  37. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.js +46 -0
  38. package/dist/llms/cloudflare_workersai.cjs +124 -0
  39. package/dist/llms/cloudflare_workersai.d.ts +49 -0
  40. package/dist/llms/cloudflare_workersai.js +120 -0
  41. package/dist/llms/fake.cjs +82 -0
  42. package/dist/llms/fake.d.ts +31 -0
  43. package/dist/llms/fake.js +78 -0
  44. package/dist/llms/sagemaker_endpoint.cjs +9 -7
  45. package/dist/llms/sagemaker_endpoint.d.ts +3 -3
  46. package/dist/llms/sagemaker_endpoint.js +9 -7
  47. package/dist/load/import_constants.cjs +2 -0
  48. package/dist/load/import_constants.js +2 -0
  49. package/dist/load/import_map.cjs +8 -2
  50. package/dist/load/import_map.d.ts +6 -0
  51. package/dist/load/import_map.js +6 -0
  52. package/dist/retrievers/zep.cjs +29 -3
  53. package/dist/retrievers/zep.d.ts +14 -0
  54. package/dist/retrievers/zep.js +29 -3
  55. package/dist/util/axios-fetch-adapter.cjs +1 -1
  56. package/dist/util/axios-fetch-adapter.js +1 -1
  57. package/dist/util/env.cjs +1 -1
  58. package/dist/util/env.js +1 -1
  59. package/dist/util/event-source-parse.cjs +1 -1
  60. package/dist/util/event-source-parse.js +1 -1
  61. package/dist/vectorstores/closevector/common.cjs +128 -0
  62. package/dist/vectorstores/closevector/common.d.ts +82 -0
  63. package/dist/vectorstores/closevector/common.js +124 -0
  64. package/dist/vectorstores/closevector/node.cjs +109 -0
  65. package/dist/vectorstores/closevector/node.d.ts +83 -0
  66. package/dist/vectorstores/closevector/node.js +105 -0
  67. package/dist/vectorstores/closevector/web.cjs +109 -0
  68. package/dist/vectorstores/closevector/web.d.ts +80 -0
  69. package/dist/vectorstores/closevector/web.js +105 -0
  70. package/dist/vectorstores/faiss.cjs +38 -6
  71. package/dist/vectorstores/faiss.d.ts +14 -2
  72. package/dist/vectorstores/faiss.js +38 -6
  73. package/dist/vectorstores/weaviate.cjs +13 -2
  74. package/dist/vectorstores/weaviate.js +13 -2
  75. package/experimental/chains/violation_of_expectations.cjs +1 -0
  76. package/experimental/chains/violation_of_expectations.d.ts +1 -0
  77. package/experimental/chains/violation_of_expectations.js +1 -0
  78. package/llms/cloudflare_workersai.cjs +1 -0
  79. package/llms/cloudflare_workersai.d.ts +1 -0
  80. package/llms/cloudflare_workersai.js +1 -0
  81. package/llms/fake.cjs +1 -0
  82. package/llms/fake.d.ts +1 -0
  83. package/llms/fake.js +1 -0
  84. package/package.json +92 -13
  85. package/vectorstores/closevector/node.cjs +1 -0
  86. package/vectorstores/closevector/node.d.ts +1 -0
  87. package/vectorstores/closevector/node.js +1 -0
  88. package/vectorstores/closevector/web.cjs +1 -0
  89. package/vectorstores/closevector/web.d.ts +1 -0
  90. package/vectorstores/closevector/web.js +1 -0
@@ -0,0 +1,120 @@
1
+ import { LLM } from "./base.js";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ /**
4
+ * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
5
+ * Language Model) class, providing a standard interface for interacting
6
+ * with the CloudflareWorkersAI language model.
7
+ */
8
+ export class CloudflareWorkersAI extends LLM {
9
+ static lc_name() {
10
+ return "CloudflareWorkersAI";
11
+ }
12
+ constructor(fields) {
13
+ super(fields ?? {});
14
+ Object.defineProperty(this, "model", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: "@cf/meta/llama-2-7b-chat-int8"
19
+ });
20
+ Object.defineProperty(this, "cloudflareAccountId", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: void 0
25
+ });
26
+ Object.defineProperty(this, "cloudflareApiToken", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
32
+ Object.defineProperty(this, "baseUrl", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: void 0
37
+ });
38
+ Object.defineProperty(this, "lc_serializable", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: true
43
+ });
44
+ this.model = fields?.model ?? this.model;
45
+ this.cloudflareAccountId =
46
+ fields?.cloudflareAccountId ??
47
+ getEnvironmentVariable("CLOUDFLARE_ACCOUNT_ID");
48
+ this.cloudflareApiToken =
49
+ fields?.cloudflareApiToken ??
50
+ getEnvironmentVariable("CLOUDFLARE_API_TOKEN");
51
+ this.baseUrl =
52
+ fields?.baseUrl ??
53
+ `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
54
+ if (this.baseUrl.endsWith("/")) {
55
+ this.baseUrl = this.baseUrl.slice(0, -1);
56
+ }
57
+ }
58
+ /**
59
+ * Method to validate the environment.
60
+ */
61
+ validateEnvironment() {
62
+ if (this.baseUrl === undefined) {
63
+ if (!this.cloudflareAccountId) {
64
+ throw new Error(`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`);
65
+ }
66
+ if (!this.cloudflareApiToken) {
67
+ throw new Error(`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`);
68
+ }
69
+ }
70
+ }
71
+ /** Get the identifying parameters for this LLM. */
72
+ get identifyingParams() {
73
+ return { model: this.model };
74
+ }
75
+ /**
76
+ * Get the parameters used to invoke the model
77
+ */
78
+ invocationParams() {
79
+ return {
80
+ model: this.model,
81
+ };
82
+ }
83
+ /** Get the type of LLM. */
84
+ _llmType() {
85
+ return "cloudflare";
86
+ }
87
+ /** Call out to CloudflareWorkersAI's complete endpoint.
88
+ Args:
89
+ prompt: The prompt to pass into the model.
90
+ Returns:
91
+ The string generated by the model.
92
+ Example:
93
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
94
+ */
95
+ async _call(prompt, options) {
96
+ this.validateEnvironment();
97
+ const url = `${this.baseUrl}/${this.model}`;
98
+ const headers = {
99
+ Authorization: `Bearer ${this.cloudflareApiToken}`,
100
+ "Content-Type": "application/json",
101
+ };
102
+ const data = { prompt };
103
+ const responseData = await this.caller.call(async () => {
104
+ const response = await fetch(url, {
105
+ method: "POST",
106
+ headers,
107
+ body: JSON.stringify(data),
108
+ signal: options.signal,
109
+ });
110
+ if (!response.ok) {
111
+ const error = new Error(`Cloudflare LLM call failed with status code ${response.status}`);
112
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
113
+ error.response = response;
114
+ throw error;
115
+ }
116
+ return response.json();
117
+ });
118
+ return responseData.result.response;
119
+ }
120
+ }
@@ -0,0 +1,82 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FakeListLLM = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const index_js_1 = require("../schema/index.cjs");
6
+ /**
7
+ * A fake LLM that returns a predefined list of responses. It can be used for
8
+ * testing purposes.
9
+ */
10
+ class FakeListLLM extends base_js_1.LLM {
11
+ static lc_name() {
12
+ return "FakeListLLM";
13
+ }
14
+ constructor({ responses, sleep }) {
15
+ super({});
16
+ Object.defineProperty(this, "responses", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: void 0
21
+ });
22
+ Object.defineProperty(this, "i", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: 0
27
+ });
28
+ Object.defineProperty(this, "sleep", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ this.responses = responses;
35
+ this.sleep = sleep;
36
+ }
37
+ _llmType() {
38
+ return "fake-list";
39
+ }
40
+ async _call(_prompt, _options, _runManager) {
41
+ const response = this._currentResponse();
42
+ this._incrementResponse();
43
+ await this._sleepIfRequested();
44
+ return response;
45
+ }
46
+ _currentResponse() {
47
+ return this.responses[this.i];
48
+ }
49
+ _incrementResponse() {
50
+ if (this.i < this.responses.length - 1) {
51
+ this.i += 1;
52
+ }
53
+ else {
54
+ this.i = 0;
55
+ }
56
+ }
57
+ async *_streamResponseChunks(_input, _options, _runManager) {
58
+ const response = this._currentResponse();
59
+ this._incrementResponse();
60
+ for await (const text of response) {
61
+ await this._sleepIfRequested();
62
+ yield this._createResponseChunk(text);
63
+ }
64
+ }
65
+ async _sleepIfRequested() {
66
+ if (this.sleep !== undefined) {
67
+ await this._sleep();
68
+ }
69
+ }
70
+ async _sleep() {
71
+ return new Promise((resolve) => {
72
+ setTimeout(() => resolve(), this.sleep);
73
+ });
74
+ }
75
+ _createResponseChunk(text) {
76
+ return new index_js_1.GenerationChunk({
77
+ text,
78
+ generationInfo: {},
79
+ });
80
+ }
81
+ }
82
+ exports.FakeListLLM = FakeListLLM;
@@ -0,0 +1,31 @@
1
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
2
+ import { LLM, BaseLLMParams } from "./base.js";
3
+ import { GenerationChunk } from "../schema/index.js";
4
+ /**
5
+ * Interface for the input parameters specific to the Fake List model.
6
+ */
7
+ export interface FakeListInput extends BaseLLMParams {
8
+ /** Responses to return */
9
+ responses: string[];
10
+ /** Time to sleep in milliseconds between responses */
11
+ sleep?: number;
12
+ }
13
+ /**
14
+ * A fake LLM that returns a predefined list of responses. It can be used for
15
+ * testing purposes.
16
+ */
17
+ export declare class FakeListLLM extends LLM {
18
+ static lc_name(): string;
19
+ responses: string[];
20
+ i: number;
21
+ sleep?: number;
22
+ constructor({ responses, sleep }: FakeListInput);
23
+ _llmType(): string;
24
+ _call(_prompt: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<string>;
25
+ _currentResponse(): string;
26
+ _incrementResponse(): void;
27
+ _streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
28
+ _sleepIfRequested(): Promise<void>;
29
+ _sleep(): Promise<void>;
30
+ _createResponseChunk(text: string): GenerationChunk;
31
+ }
@@ -0,0 +1,78 @@
1
+ import { LLM } from "./base.js";
2
+ import { GenerationChunk } from "../schema/index.js";
3
+ /**
4
+ * A fake LLM that returns a predefined list of responses. It can be used for
5
+ * testing purposes.
6
+ */
7
+ export class FakeListLLM extends LLM {
8
+ static lc_name() {
9
+ return "FakeListLLM";
10
+ }
11
+ constructor({ responses, sleep }) {
12
+ super({});
13
+ Object.defineProperty(this, "responses", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: void 0
18
+ });
19
+ Object.defineProperty(this, "i", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: 0
24
+ });
25
+ Object.defineProperty(this, "sleep", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ this.responses = responses;
32
+ this.sleep = sleep;
33
+ }
34
+ _llmType() {
35
+ return "fake-list";
36
+ }
37
+ async _call(_prompt, _options, _runManager) {
38
+ const response = this._currentResponse();
39
+ this._incrementResponse();
40
+ await this._sleepIfRequested();
41
+ return response;
42
+ }
43
+ _currentResponse() {
44
+ return this.responses[this.i];
45
+ }
46
+ _incrementResponse() {
47
+ if (this.i < this.responses.length - 1) {
48
+ this.i += 1;
49
+ }
50
+ else {
51
+ this.i = 0;
52
+ }
53
+ }
54
+ async *_streamResponseChunks(_input, _options, _runManager) {
55
+ const response = this._currentResponse();
56
+ this._incrementResponse();
57
+ for await (const text of response) {
58
+ await this._sleepIfRequested();
59
+ yield this._createResponseChunk(text);
60
+ }
61
+ }
62
+ async _sleepIfRequested() {
63
+ if (this.sleep !== undefined) {
64
+ await this._sleep();
65
+ }
66
+ }
67
+ async _sleep() {
68
+ return new Promise((resolve) => {
69
+ setTimeout(() => resolve(), this.sleep);
70
+ });
71
+ }
72
+ _createResponseChunk(text) {
73
+ return new GenerationChunk({
74
+ text,
75
+ generationInfo: {},
76
+ });
77
+ }
78
+ }
@@ -131,18 +131,18 @@ class SageMakerEndpoint extends base_js_1.LLM {
131
131
  * Calls the SageMaker endpoint and retrieves the result.
132
132
  * @param {string} prompt The input prompt.
133
133
  * @param {this["ParsedCallOptions"]} options Parsed call options.
134
- * @param {CallbackManagerForLLMRun} _runManager Optional run manager.
134
+ * @param {CallbackManagerForLLMRun} runManager Optional run manager.
135
135
  * @returns {Promise<string>} A promise that resolves to the generated string.
136
136
  */
137
137
  /** @ignore */
138
- async _call(prompt, options, _runManager) {
138
+ async _call(prompt, options, runManager) {
139
139
  return this.streaming
140
- ? await this.streamingCall(prompt, options)
140
+ ? await this.streamingCall(prompt, options, runManager)
141
141
  : await this.noStreamingCall(prompt, options);
142
142
  }
143
- async streamingCall(prompt, options) {
143
+ async streamingCall(prompt, options, runManager) {
144
144
  const chunks = [];
145
- for await (const chunk of this._streamResponseChunks(prompt, options)) {
145
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
146
146
  chunks.push(chunk.text);
147
147
  }
148
148
  return chunks.join("");
@@ -168,7 +168,7 @@ class SageMakerEndpoint extends base_js_1.LLM {
168
168
  * @param {this["ParsedCallOptions"]} options Parsed call options.
169
169
  * @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
170
170
  */
171
- async *_streamResponseChunks(prompt, options) {
171
+ async *_streamResponseChunks(prompt, options, runManager) {
172
172
  const body = await this.contentHandler.transformInput(prompt, this.modelKwargs ?? {});
173
173
  const { contentType, accepts } = this.contentHandler;
174
174
  const stream = await this.caller.call(() => this.client.send(new client_sagemaker_runtime_1.InvokeEndpointWithResponseStreamCommand({
@@ -183,13 +183,15 @@ class SageMakerEndpoint extends base_js_1.LLM {
183
183
  }
184
184
  for await (const chunk of stream.Body) {
185
185
  if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
186
+ const text = await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes);
186
187
  yield new index_js_1.GenerationChunk({
187
- text: await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes),
188
+ text,
188
189
  generationInfo: {
189
190
  ...chunk,
190
191
  response: undefined,
191
192
  },
192
193
  });
194
+ await runManager?.handleLLMNewToken(text);
193
195
  }
194
196
  else if (chunk.InternalStreamFailure) {
195
197
  throw new Error(chunk.InternalStreamFailure.message);
@@ -104,11 +104,11 @@ export declare class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
104
104
  * Calls the SageMaker endpoint and retrieves the result.
105
105
  * @param {string} prompt The input prompt.
106
106
  * @param {this["ParsedCallOptions"]} options Parsed call options.
107
- * @param {CallbackManagerForLLMRun} _runManager Optional run manager.
107
+ * @param {CallbackManagerForLLMRun} runManager Optional run manager.
108
108
  * @returns {Promise<string>} A promise that resolves to the generated string.
109
109
  */
110
110
  /** @ignore */
111
- _call(prompt: string, options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<string>;
111
+ _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
112
112
  private streamingCall;
113
113
  private noStreamingCall;
114
114
  /**
@@ -117,5 +117,5 @@ export declare class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
117
117
  * @param {this["ParsedCallOptions"]} options Parsed call options.
118
118
  * @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
119
119
  */
120
- _streamResponseChunks(prompt: string, options: this["ParsedCallOptions"]): AsyncGenerator<GenerationChunk>;
120
+ _streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
121
121
  }
@@ -127,18 +127,18 @@ export class SageMakerEndpoint extends LLM {
127
127
  * Calls the SageMaker endpoint and retrieves the result.
128
128
  * @param {string} prompt The input prompt.
129
129
  * @param {this["ParsedCallOptions"]} options Parsed call options.
130
- * @param {CallbackManagerForLLMRun} _runManager Optional run manager.
130
+ * @param {CallbackManagerForLLMRun} runManager Optional run manager.
131
131
  * @returns {Promise<string>} A promise that resolves to the generated string.
132
132
  */
133
133
  /** @ignore */
134
- async _call(prompt, options, _runManager) {
134
+ async _call(prompt, options, runManager) {
135
135
  return this.streaming
136
- ? await this.streamingCall(prompt, options)
136
+ ? await this.streamingCall(prompt, options, runManager)
137
137
  : await this.noStreamingCall(prompt, options);
138
138
  }
139
- async streamingCall(prompt, options) {
139
+ async streamingCall(prompt, options, runManager) {
140
140
  const chunks = [];
141
- for await (const chunk of this._streamResponseChunks(prompt, options)) {
141
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
142
142
  chunks.push(chunk.text);
143
143
  }
144
144
  return chunks.join("");
@@ -164,7 +164,7 @@ export class SageMakerEndpoint extends LLM {
164
164
  * @param {this["ParsedCallOptions"]} options Parsed call options.
165
165
  * @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
166
166
  */
167
- async *_streamResponseChunks(prompt, options) {
167
+ async *_streamResponseChunks(prompt, options, runManager) {
168
168
  const body = await this.contentHandler.transformInput(prompt, this.modelKwargs ?? {});
169
169
  const { contentType, accepts } = this.contentHandler;
170
170
  const stream = await this.caller.call(() => this.client.send(new InvokeEndpointWithResponseStreamCommand({
@@ -179,13 +179,15 @@ export class SageMakerEndpoint extends LLM {
179
179
  }
180
180
  for await (const chunk of stream.Body) {
181
181
  if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
182
+ const text = await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes);
182
183
  yield new GenerationChunk({
183
- text: await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes),
184
+ text,
184
185
  generationInfo: {
185
186
  ...chunk,
186
187
  response: undefined,
187
188
  },
188
189
  });
190
+ await runManager?.handleLLMNewToken(text);
189
191
  }
190
192
  else if (chunk.InternalStreamFailure) {
191
193
  throw new Error(chunk.InternalStreamFailure.message);
@@ -43,6 +43,8 @@ exports.optionalImportEntrypoints = [
43
43
  "langchain/vectorstores/cassandra",
44
44
  "langchain/vectorstores/elasticsearch",
45
45
  "langchain/vectorstores/cloudflare_vectorize",
46
+ "langchain/vectorstores/closevector/web",
47
+ "langchain/vectorstores/closevector/node",
46
48
  "langchain/vectorstores/chroma",
47
49
  "langchain/vectorstores/googlevertexai",
48
50
  "langchain/vectorstores/hnswlib",
@@ -40,6 +40,8 @@ export const optionalImportEntrypoints = [
40
40
  "langchain/vectorstores/cassandra",
41
41
  "langchain/vectorstores/elasticsearch",
42
42
  "langchain/vectorstores/cloudflare_vectorize",
43
+ "langchain/vectorstores/closevector/web",
44
+ "langchain/vectorstores/closevector/node",
43
45
  "langchain/vectorstores/chroma",
44
46
  "langchain/vectorstores/googlevertexai",
45
47
  "langchain/vectorstores/hnswlib",
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = void 0;
27
+ exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.runnables__remote = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -43,9 +43,11 @@ exports.llms__base = __importStar(require("../llms/base.cjs"));
43
43
  exports.llms__openai = __importStar(require("../llms/openai.cjs"));
44
44
  exports.llms__ai21 = __importStar(require("../llms/ai21.cjs"));
45
45
  exports.llms__aleph_alpha = __importStar(require("../llms/aleph_alpha.cjs"));
46
+ exports.llms__cloudflare_workersai = __importStar(require("../llms/cloudflare_workersai.cjs"));
46
47
  exports.llms__ollama = __importStar(require("../llms/ollama.cjs"));
47
48
  exports.llms__fireworks = __importStar(require("../llms/fireworks.cjs"));
48
49
  exports.llms__yandex = __importStar(require("../llms/yandex.cjs"));
50
+ exports.llms__fake = __importStar(require("../llms/fake.cjs"));
49
51
  exports.prompts = __importStar(require("../prompts/index.cjs"));
50
52
  exports.vectorstores__base = __importStar(require("../vectorstores/base.cjs"));
51
53
  exports.vectorstores__memory = __importStar(require("../vectorstores/memory.cjs"));
@@ -63,10 +65,13 @@ exports.document_transformers__openai_functions = __importStar(require("../docum
63
65
  exports.chat_models__base = __importStar(require("../chat_models/base.cjs"));
64
66
  exports.chat_models__openai = __importStar(require("../chat_models/openai.cjs"));
65
67
  exports.chat_models__anthropic = __importStar(require("../chat_models/anthropic.cjs"));
68
+ exports.chat_models__cloudflare_workersai = __importStar(require("../chat_models/cloudflare_workersai.cjs"));
66
69
  exports.chat_models__fireworks = __importStar(require("../chat_models/fireworks.cjs"));
67
70
  exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwenxin.cjs"));
68
71
  exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
69
72
  exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
73
+ exports.chat_models__yandex = __importStar(require("../chat_models/yandex.cjs"));
74
+ exports.chat_models__fake = __importStar(require("../chat_models/fake.cjs"));
70
75
  exports.schema = __importStar(require("../schema/index.cjs"));
71
76
  exports.schema__document = __importStar(require("../schema/document.cjs"));
72
77
  exports.schema__output_parser = __importStar(require("../schema/output_parser.cjs"));
@@ -104,5 +109,6 @@ exports.experimental__babyagi = __importStar(require("../experimental/babyagi/in
104
109
  exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
105
110
  exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
106
111
  exports.experimental__chat_models__bittensor = __importStar(require("../experimental/chat_models/bittensor.cjs"));
112
+ exports.experimental__chains__violation_of_expectations = __importStar(require("../experimental/chains/violation_of_expectations/index.cjs"));
107
113
  exports.evaluation = __importStar(require("../evaluation/index.cjs"));
108
114
  exports.runnables__remote = __importStar(require("../runnables/remote.cjs"));
@@ -15,9 +15,11 @@ export * as llms__base from "../llms/base.js";
15
15
  export * as llms__openai from "../llms/openai.js";
16
16
  export * as llms__ai21 from "../llms/ai21.js";
17
17
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
18
+ export * as llms__cloudflare_workersai from "../llms/cloudflare_workersai.js";
18
19
  export * as llms__ollama from "../llms/ollama.js";
19
20
  export * as llms__fireworks from "../llms/fireworks.js";
20
21
  export * as llms__yandex from "../llms/yandex.js";
22
+ export * as llms__fake from "../llms/fake.js";
21
23
  export * as prompts from "../prompts/index.js";
22
24
  export * as vectorstores__base from "../vectorstores/base.js";
23
25
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -35,10 +37,13 @@ export * as document_transformers__openai_functions from "../document_transforme
35
37
  export * as chat_models__base from "../chat_models/base.js";
36
38
  export * as chat_models__openai from "../chat_models/openai.js";
37
39
  export * as chat_models__anthropic from "../chat_models/anthropic.js";
40
+ export * as chat_models__cloudflare_workersai from "../chat_models/cloudflare_workersai.js";
38
41
  export * as chat_models__fireworks from "../chat_models/fireworks.js";
39
42
  export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
40
43
  export * as chat_models__ollama from "../chat_models/ollama.js";
41
44
  export * as chat_models__minimax from "../chat_models/minimax.js";
45
+ export * as chat_models__yandex from "../chat_models/yandex.js";
46
+ export * as chat_models__fake from "../chat_models/fake.js";
42
47
  export * as schema from "../schema/index.js";
43
48
  export * as schema__document from "../schema/document.js";
44
49
  export * as schema__output_parser from "../schema/output_parser.js";
@@ -76,5 +81,6 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
76
81
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
77
82
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
78
83
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
84
+ export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
79
85
  export * as evaluation from "../evaluation/index.js";
80
86
  export * as runnables__remote from "../runnables/remote.js";
@@ -16,9 +16,11 @@ export * as llms__base from "../llms/base.js";
16
16
  export * as llms__openai from "../llms/openai.js";
17
17
  export * as llms__ai21 from "../llms/ai21.js";
18
18
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
19
+ export * as llms__cloudflare_workersai from "../llms/cloudflare_workersai.js";
19
20
  export * as llms__ollama from "../llms/ollama.js";
20
21
  export * as llms__fireworks from "../llms/fireworks.js";
21
22
  export * as llms__yandex from "../llms/yandex.js";
23
+ export * as llms__fake from "../llms/fake.js";
22
24
  export * as prompts from "../prompts/index.js";
23
25
  export * as vectorstores__base from "../vectorstores/base.js";
24
26
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -36,10 +38,13 @@ export * as document_transformers__openai_functions from "../document_transforme
36
38
  export * as chat_models__base from "../chat_models/base.js";
37
39
  export * as chat_models__openai from "../chat_models/openai.js";
38
40
  export * as chat_models__anthropic from "../chat_models/anthropic.js";
41
+ export * as chat_models__cloudflare_workersai from "../chat_models/cloudflare_workersai.js";
39
42
  export * as chat_models__fireworks from "../chat_models/fireworks.js";
40
43
  export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
41
44
  export * as chat_models__ollama from "../chat_models/ollama.js";
42
45
  export * as chat_models__minimax from "../chat_models/minimax.js";
46
+ export * as chat_models__yandex from "../chat_models/yandex.js";
47
+ export * as chat_models__fake from "../chat_models/fake.js";
43
48
  export * as schema from "../schema/index.js";
44
49
  export * as schema__document from "../schema/document.js";
45
50
  export * as schema__output_parser from "../schema/output_parser.js";
@@ -77,5 +82,6 @@ export * as experimental__babyagi from "../experimental/babyagi/index.js";
77
82
  export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
78
83
  export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
79
84
  export * as experimental__chat_models__bittensor from "../experimental/chat_models/bittensor.js";
85
+ export * as experimental__chains__violation_of_expectations from "../experimental/chains/violation_of_expectations/index.js";
80
86
  export * as evaluation from "../evaluation/index.js";
81
87
  export * as runnables__remote from "../runnables/remote.js";
@@ -47,8 +47,29 @@ class ZepRetriever extends retriever_js_1.BaseRetriever {
47
47
  writable: true,
48
48
  value: void 0
49
49
  });
50
+ Object.defineProperty(this, "searchType", {
51
+ enumerable: true,
52
+ configurable: true,
53
+ writable: true,
54
+ value: void 0
55
+ });
56
+ Object.defineProperty(this, "mmrLambda", {
57
+ enumerable: true,
58
+ configurable: true,
59
+ writable: true,
60
+ value: void 0
61
+ });
62
+ Object.defineProperty(this, "filter", {
63
+ enumerable: true,
64
+ configurable: true,
65
+ writable: true,
66
+ value: void 0
67
+ });
50
68
  this.sessionId = config.sessionId;
51
69
  this.topK = config.topK;
70
+ this.searchType = config.searchType;
71
+ this.mmrLambda = config.mmrLambda;
72
+ this.filter = config.filter;
52
73
  this.zepClientPromise = zep_js_1.ZepClient.init(config.url, config.apiKey);
53
74
  }
54
75
  /**
@@ -59,9 +80,9 @@ class ZepRetriever extends retriever_js_1.BaseRetriever {
59
80
  searchResultToDoc(results) {
60
81
  return results
61
82
  .filter((r) => r.message)
62
- .map(({ message: { content } = {}, ...metadata }, dist) => new document_js_1.Document({
83
+ .map(({ message: { content, metadata: messageMetadata } = {}, dist, ...rest }) => new document_js_1.Document({
63
84
  pageContent: content ?? "",
64
- metadata: { score: dist, ...metadata },
85
+ metadata: { score: dist, ...messageMetadata, ...rest },
65
86
  }));
66
87
  }
67
88
  /**
@@ -70,7 +91,12 @@ class ZepRetriever extends retriever_js_1.BaseRetriever {
70
91
  * @returns {Promise<Document[]>} A promise that resolves to an array of relevant Document objects.
71
92
  */
72
93
  async _getRelevantDocuments(query) {
73
- const payload = { text: query, metadata: {} };
94
+ const payload = {
95
+ text: query,
96
+ metadata: this.filter,
97
+ search_type: this.searchType,
98
+ mmr_lambda: this.mmrLambda,
99
+ };
74
100
  // Wait for ZepClient to be initialized
75
101
  const zepClient = await this.zepClientPromise;
76
102
  if (!zepClient) {