langchain 0.0.167 → 0.0.168

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/README.md +4 -4
  2. package/chat_models/cloudflare_workersai.cjs +1 -0
  3. package/chat_models/cloudflare_workersai.d.ts +1 -0
  4. package/chat_models/cloudflare_workersai.js +1 -0
  5. package/chat_models/fake.cjs +1 -0
  6. package/chat_models/fake.d.ts +1 -0
  7. package/chat_models/fake.js +1 -0
  8. package/dist/agents/chat/index.cjs +3 -2
  9. package/dist/agents/chat/index.d.ts +3 -0
  10. package/dist/agents/chat/index.js +3 -2
  11. package/dist/chat_models/cloudflare_workersai.cjs +140 -0
  12. package/dist/chat_models/cloudflare_workersai.d.ts +46 -0
  13. package/dist/chat_models/cloudflare_workersai.js +136 -0
  14. package/dist/chat_models/fake.cjs +101 -0
  15. package/dist/chat_models/fake.d.ts +36 -0
  16. package/dist/chat_models/fake.js +97 -0
  17. package/dist/llms/cloudflare_workersai.cjs +117 -0
  18. package/dist/llms/cloudflare_workersai.d.ts +49 -0
  19. package/dist/llms/cloudflare_workersai.js +113 -0
  20. package/dist/llms/fake.cjs +82 -0
  21. package/dist/llms/fake.d.ts +31 -0
  22. package/dist/llms/fake.js +78 -0
  23. package/dist/llms/sagemaker_endpoint.cjs +9 -7
  24. package/dist/llms/sagemaker_endpoint.d.ts +3 -3
  25. package/dist/llms/sagemaker_endpoint.js +9 -7
  26. package/dist/load/import_constants.cjs +2 -0
  27. package/dist/load/import_constants.js +2 -0
  28. package/dist/load/import_map.cjs +6 -2
  29. package/dist/load/import_map.d.ts +4 -0
  30. package/dist/load/import_map.js +4 -0
  31. package/dist/util/axios-fetch-adapter.cjs +1 -1
  32. package/dist/util/axios-fetch-adapter.js +1 -1
  33. package/dist/util/env.cjs +1 -1
  34. package/dist/util/env.js +1 -1
  35. package/dist/util/event-source-parse.cjs +1 -1
  36. package/dist/util/event-source-parse.js +1 -1
  37. package/dist/vectorstores/closevector/common.cjs +128 -0
  38. package/dist/vectorstores/closevector/common.d.ts +82 -0
  39. package/dist/vectorstores/closevector/common.js +124 -0
  40. package/dist/vectorstores/closevector/node.cjs +109 -0
  41. package/dist/vectorstores/closevector/node.d.ts +83 -0
  42. package/dist/vectorstores/closevector/node.js +105 -0
  43. package/dist/vectorstores/closevector/web.cjs +109 -0
  44. package/dist/vectorstores/closevector/web.d.ts +80 -0
  45. package/dist/vectorstores/closevector/web.js +105 -0
  46. package/llms/cloudflare_workersai.cjs +1 -0
  47. package/llms/cloudflare_workersai.d.ts +1 -0
  48. package/llms/cloudflare_workersai.js +1 -0
  49. package/llms/fake.cjs +1 -0
  50. package/llms/fake.d.ts +1 -0
  51. package/llms/fake.js +1 -0
  52. package/package.json +68 -5
  53. package/vectorstores/closevector/node.cjs +1 -0
  54. package/vectorstores/closevector/node.d.ts +1 -0
  55. package/vectorstores/closevector/node.js +1 -0
  56. package/vectorstores/closevector/web.cjs +1 -0
  57. package/vectorstores/closevector/web.d.ts +1 -0
  58. package/vectorstores/closevector/web.js +1 -0
@@ -0,0 +1,117 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CloudflareWorkersAI = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const env_js_1 = require("../util/env.cjs");
6
+ /**
7
+ * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
8
+ * Language Model) class, providing a standard interface for interacting
9
+ * with the CloudflareWorkersAI language model.
10
+ */
11
+ class CloudflareWorkersAI extends base_js_1.LLM {
12
+ static lc_name() {
13
+ return "CloudflareWorkersAI";
14
+ }
15
+ constructor(fields) {
16
+ super(fields ?? {});
17
+ Object.defineProperty(this, "model", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: "@cf/meta/llama-2-7b-chat-int8"
22
+ });
23
+ Object.defineProperty(this, "cloudflareAccountId", {
24
+ enumerable: true,
25
+ configurable: true,
26
+ writable: true,
27
+ value: void 0
28
+ });
29
+ Object.defineProperty(this, "cloudflareApiToken", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: void 0
34
+ });
35
+ Object.defineProperty(this, "baseUrl", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: void 0
40
+ });
41
+ Object.defineProperty(this, "lc_serializable", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: true
46
+ });
47
+ this.model = fields?.model ?? this.model;
48
+ this.cloudflareAccountId =
49
+ fields?.cloudflareAccountId ??
50
+ (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_ACCOUNT_ID");
51
+ this.cloudflareApiToken =
52
+ fields?.cloudflareApiToken ??
53
+ (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_API_TOKEN");
54
+ this.baseUrl = fields?.baseUrl;
55
+ }
56
+ /**
57
+ * Method to validate the environment.
58
+ */
59
+ validateEnvironment() {
60
+ if (!this.cloudflareAccountId) {
61
+ throw new Error(`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`);
62
+ }
63
+ if (!this.cloudflareApiToken) {
64
+ throw new Error(`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`);
65
+ }
66
+ }
67
+ /** Get the identifying parameters for this LLM. */
68
+ get identifyingParams() {
69
+ return { model: this.model };
70
+ }
71
+ /**
72
+ * Get the parameters used to invoke the model
73
+ */
74
+ invocationParams() {
75
+ return {
76
+ model: this.model,
77
+ };
78
+ }
79
+ /** Get the type of LLM. */
80
+ _llmType() {
81
+ return "cloudflare";
82
+ }
83
+ /** Call out to CloudflareWorkersAI's complete endpoint.
84
+ Args:
85
+ prompt: The prompt to pass into the model.
86
+ Returns:
87
+ The string generated by the model.
88
+ Example:
89
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
90
+ */
91
+ async _call(prompt, options) {
92
+ this.validateEnvironment();
93
+ const url = `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run/${this.model}`;
94
+ const headers = {
95
+ Authorization: `Bearer ${this.cloudflareApiToken}`,
96
+ "Content-Type": "application/json",
97
+ };
98
+ const data = { prompt };
99
+ const responseData = await this.caller.call(async () => {
100
+ const response = await fetch(url, {
101
+ method: "POST",
102
+ headers,
103
+ body: JSON.stringify(data),
104
+ signal: options.signal,
105
+ });
106
+ if (!response.ok) {
107
+ const error = new Error(`Cloudflare LLM call failed with status code ${response.status}`);
108
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
109
+ error.response = response;
110
+ throw error;
111
+ }
112
+ return response.json();
113
+ });
114
+ return responseData.result.response;
115
+ }
116
+ }
117
+ exports.CloudflareWorkersAI = CloudflareWorkersAI;
@@ -0,0 +1,49 @@
1
+ import { LLM, BaseLLMParams } from "./base.js";
2
+ /**
3
+ * Interface for CloudflareWorkersAI input parameters.
4
+ */
5
+ export interface CloudflareWorkersAIInput {
6
+ cloudflareAccountId?: string;
7
+ cloudflareApiToken?: string;
8
+ model?: string;
9
+ baseUrl?: string;
10
+ }
11
+ /**
12
+ * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
13
+ * Language Model) class, providing a standard interface for interacting
14
+ * with the CloudflareWorkersAI language model.
15
+ */
16
+ export declare class CloudflareWorkersAI extends LLM implements CloudflareWorkersAIInput {
17
+ model: string;
18
+ cloudflareAccountId?: string;
19
+ cloudflareApiToken?: string;
20
+ baseUrl?: string;
21
+ static lc_name(): string;
22
+ lc_serializable: boolean;
23
+ constructor(fields?: CloudflareWorkersAIInput & BaseLLMParams);
24
+ /**
25
+ * Method to validate the environment.
26
+ */
27
+ validateEnvironment(): void;
28
+ /** Get the identifying parameters for this LLM. */
29
+ get identifyingParams(): {
30
+ model: string;
31
+ };
32
+ /**
33
+ * Get the parameters used to invoke the model
34
+ */
35
+ invocationParams(): {
36
+ model: string;
37
+ };
38
+ /** Get the type of LLM. */
39
+ _llmType(): string;
40
+ /** Call out to CloudflareWorkersAI's complete endpoint.
41
+ Args:
42
+ prompt: The prompt to pass into the model.
43
+ Returns:
44
+ The string generated by the model.
45
+ Example:
46
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
47
+ */
48
+ _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
49
+ }
@@ -0,0 +1,113 @@
1
+ import { LLM } from "./base.js";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ /**
4
+ * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
5
+ * Language Model) class, providing a standard interface for interacting
6
+ * with the CloudflareWorkersAI language model.
7
+ */
8
+ export class CloudflareWorkersAI extends LLM {
9
+ static lc_name() {
10
+ return "CloudflareWorkersAI";
11
+ }
12
+ constructor(fields) {
13
+ super(fields ?? {});
14
+ Object.defineProperty(this, "model", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: "@cf/meta/llama-2-7b-chat-int8"
19
+ });
20
+ Object.defineProperty(this, "cloudflareAccountId", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: void 0
25
+ });
26
+ Object.defineProperty(this, "cloudflareApiToken", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
32
+ Object.defineProperty(this, "baseUrl", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: void 0
37
+ });
38
+ Object.defineProperty(this, "lc_serializable", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: true
43
+ });
44
+ this.model = fields?.model ?? this.model;
45
+ this.cloudflareAccountId =
46
+ fields?.cloudflareAccountId ??
47
+ getEnvironmentVariable("CLOUDFLARE_ACCOUNT_ID");
48
+ this.cloudflareApiToken =
49
+ fields?.cloudflareApiToken ??
50
+ getEnvironmentVariable("CLOUDFLARE_API_TOKEN");
51
+ this.baseUrl = fields?.baseUrl;
52
+ }
53
+ /**
54
+ * Method to validate the environment.
55
+ */
56
+ validateEnvironment() {
57
+ if (!this.cloudflareAccountId) {
58
+ throw new Error(`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`);
59
+ }
60
+ if (!this.cloudflareApiToken) {
61
+ throw new Error(`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`);
62
+ }
63
+ }
64
+ /** Get the identifying parameters for this LLM. */
65
+ get identifyingParams() {
66
+ return { model: this.model };
67
+ }
68
+ /**
69
+ * Get the parameters used to invoke the model
70
+ */
71
+ invocationParams() {
72
+ return {
73
+ model: this.model,
74
+ };
75
+ }
76
+ /** Get the type of LLM. */
77
+ _llmType() {
78
+ return "cloudflare";
79
+ }
80
+ /** Call out to CloudflareWorkersAI's complete endpoint.
81
+ Args:
82
+ prompt: The prompt to pass into the model.
83
+ Returns:
84
+ The string generated by the model.
85
+ Example:
86
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
87
+ */
88
+ async _call(prompt, options) {
89
+ this.validateEnvironment();
90
+ const url = `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run/${this.model}`;
91
+ const headers = {
92
+ Authorization: `Bearer ${this.cloudflareApiToken}`,
93
+ "Content-Type": "application/json",
94
+ };
95
+ const data = { prompt };
96
+ const responseData = await this.caller.call(async () => {
97
+ const response = await fetch(url, {
98
+ method: "POST",
99
+ headers,
100
+ body: JSON.stringify(data),
101
+ signal: options.signal,
102
+ });
103
+ if (!response.ok) {
104
+ const error = new Error(`Cloudflare LLM call failed with status code ${response.status}`);
105
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
106
+ error.response = response;
107
+ throw error;
108
+ }
109
+ return response.json();
110
+ });
111
+ return responseData.result.response;
112
+ }
113
+ }
@@ -0,0 +1,82 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FakeListLLM = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const index_js_1 = require("../schema/index.cjs");
6
+ /**
7
+ * A fake LLM that returns a predefined list of responses. It can be used for
8
+ * testing purposes.
9
+ */
10
+ class FakeListLLM extends base_js_1.LLM {
11
+ static lc_name() {
12
+ return "FakeListLLM";
13
+ }
14
+ constructor({ responses, sleep }) {
15
+ super({});
16
+ Object.defineProperty(this, "responses", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: void 0
21
+ });
22
+ Object.defineProperty(this, "i", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: 0
27
+ });
28
+ Object.defineProperty(this, "sleep", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ this.responses = responses;
35
+ this.sleep = sleep;
36
+ }
37
+ _llmType() {
38
+ return "fake-list";
39
+ }
40
+ async _call(_prompt, _options, _runManager) {
41
+ const response = this._currentResponse();
42
+ this._incrementResponse();
43
+ await this._sleepIfRequested();
44
+ return response;
45
+ }
46
+ _currentResponse() {
47
+ return this.responses[this.i];
48
+ }
49
+ _incrementResponse() {
50
+ if (this.i < this.responses.length - 1) {
51
+ this.i += 1;
52
+ }
53
+ else {
54
+ this.i = 0;
55
+ }
56
+ }
57
+ async *_streamResponseChunks(_input, _options, _runManager) {
58
+ const response = this._currentResponse();
59
+ this._incrementResponse();
60
+ for await (const text of response) {
61
+ await this._sleepIfRequested();
62
+ yield this._createResponseChunk(text);
63
+ }
64
+ }
65
+ async _sleepIfRequested() {
66
+ if (this.sleep !== undefined) {
67
+ await this._sleep();
68
+ }
69
+ }
70
+ async _sleep() {
71
+ return new Promise((resolve) => {
72
+ setTimeout(() => resolve(), this.sleep);
73
+ });
74
+ }
75
+ _createResponseChunk(text) {
76
+ return new index_js_1.GenerationChunk({
77
+ text,
78
+ generationInfo: {},
79
+ });
80
+ }
81
+ }
82
+ exports.FakeListLLM = FakeListLLM;
@@ -0,0 +1,31 @@
1
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
2
+ import { LLM, BaseLLMParams } from "./base.js";
3
+ import { GenerationChunk } from "../schema/index.js";
4
+ /**
5
+ * Interface for the input parameters specific to the Fake List model.
6
+ */
7
+ export interface FakeListInput extends BaseLLMParams {
8
+ /** Responses to return */
9
+ responses: string[];
10
+ /** Time to sleep in milliseconds between responses */
11
+ sleep?: number;
12
+ }
13
+ /**
14
+ * A fake LLM that returns a predefined list of responses. It can be used for
15
+ * testing purposes.
16
+ */
17
+ export declare class FakeListLLM extends LLM {
18
+ static lc_name(): string;
19
+ responses: string[];
20
+ i: number;
21
+ sleep?: number;
22
+ constructor({ responses, sleep }: FakeListInput);
23
+ _llmType(): string;
24
+ _call(_prompt: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<string>;
25
+ _currentResponse(): string;
26
+ _incrementResponse(): void;
27
+ _streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
28
+ _sleepIfRequested(): Promise<void>;
29
+ _sleep(): Promise<void>;
30
+ _createResponseChunk(text: string): GenerationChunk;
31
+ }
@@ -0,0 +1,78 @@
1
+ import { LLM } from "./base.js";
2
+ import { GenerationChunk } from "../schema/index.js";
3
+ /**
4
+ * A fake LLM that returns a predefined list of responses. It can be used for
5
+ * testing purposes.
6
+ */
7
+ export class FakeListLLM extends LLM {
8
+ static lc_name() {
9
+ return "FakeListLLM";
10
+ }
11
+ constructor({ responses, sleep }) {
12
+ super({});
13
+ Object.defineProperty(this, "responses", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: void 0
18
+ });
19
+ Object.defineProperty(this, "i", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: 0
24
+ });
25
+ Object.defineProperty(this, "sleep", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ this.responses = responses;
32
+ this.sleep = sleep;
33
+ }
34
+ _llmType() {
35
+ return "fake-list";
36
+ }
37
+ async _call(_prompt, _options, _runManager) {
38
+ const response = this._currentResponse();
39
+ this._incrementResponse();
40
+ await this._sleepIfRequested();
41
+ return response;
42
+ }
43
+ _currentResponse() {
44
+ return this.responses[this.i];
45
+ }
46
+ _incrementResponse() {
47
+ if (this.i < this.responses.length - 1) {
48
+ this.i += 1;
49
+ }
50
+ else {
51
+ this.i = 0;
52
+ }
53
+ }
54
+ async *_streamResponseChunks(_input, _options, _runManager) {
55
+ const response = this._currentResponse();
56
+ this._incrementResponse();
57
+ for await (const text of response) {
58
+ await this._sleepIfRequested();
59
+ yield this._createResponseChunk(text);
60
+ }
61
+ }
62
+ async _sleepIfRequested() {
63
+ if (this.sleep !== undefined) {
64
+ await this._sleep();
65
+ }
66
+ }
67
+ async _sleep() {
68
+ return new Promise((resolve) => {
69
+ setTimeout(() => resolve(), this.sleep);
70
+ });
71
+ }
72
+ _createResponseChunk(text) {
73
+ return new GenerationChunk({
74
+ text,
75
+ generationInfo: {},
76
+ });
77
+ }
78
+ }
@@ -131,18 +131,18 @@ class SageMakerEndpoint extends base_js_1.LLM {
131
131
  * Calls the SageMaker endpoint and retrieves the result.
132
132
  * @param {string} prompt The input prompt.
133
133
  * @param {this["ParsedCallOptions"]} options Parsed call options.
134
- * @param {CallbackManagerForLLMRun} _runManager Optional run manager.
134
+ * @param {CallbackManagerForLLMRun} runManager Optional run manager.
135
135
  * @returns {Promise<string>} A promise that resolves to the generated string.
136
136
  */
137
137
  /** @ignore */
138
- async _call(prompt, options, _runManager) {
138
+ async _call(prompt, options, runManager) {
139
139
  return this.streaming
140
- ? await this.streamingCall(prompt, options)
140
+ ? await this.streamingCall(prompt, options, runManager)
141
141
  : await this.noStreamingCall(prompt, options);
142
142
  }
143
- async streamingCall(prompt, options) {
143
+ async streamingCall(prompt, options, runManager) {
144
144
  const chunks = [];
145
- for await (const chunk of this._streamResponseChunks(prompt, options)) {
145
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
146
146
  chunks.push(chunk.text);
147
147
  }
148
148
  return chunks.join("");
@@ -168,7 +168,7 @@ class SageMakerEndpoint extends base_js_1.LLM {
168
168
  * @param {this["ParsedCallOptions"]} options Parsed call options.
169
169
  * @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
170
170
  */
171
- async *_streamResponseChunks(prompt, options) {
171
+ async *_streamResponseChunks(prompt, options, runManager) {
172
172
  const body = await this.contentHandler.transformInput(prompt, this.modelKwargs ?? {});
173
173
  const { contentType, accepts } = this.contentHandler;
174
174
  const stream = await this.caller.call(() => this.client.send(new client_sagemaker_runtime_1.InvokeEndpointWithResponseStreamCommand({
@@ -183,13 +183,15 @@ class SageMakerEndpoint extends base_js_1.LLM {
183
183
  }
184
184
  for await (const chunk of stream.Body) {
185
185
  if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
186
+ const text = await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes);
186
187
  yield new index_js_1.GenerationChunk({
187
- text: await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes),
188
+ text,
188
189
  generationInfo: {
189
190
  ...chunk,
190
191
  response: undefined,
191
192
  },
192
193
  });
194
+ await runManager?.handleLLMNewToken(text);
193
195
  }
194
196
  else if (chunk.InternalStreamFailure) {
195
197
  throw new Error(chunk.InternalStreamFailure.message);
@@ -104,11 +104,11 @@ export declare class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
104
104
  * Calls the SageMaker endpoint and retrieves the result.
105
105
  * @param {string} prompt The input prompt.
106
106
  * @param {this["ParsedCallOptions"]} options Parsed call options.
107
- * @param {CallbackManagerForLLMRun} _runManager Optional run manager.
107
+ * @param {CallbackManagerForLLMRun} runManager Optional run manager.
108
108
  * @returns {Promise<string>} A promise that resolves to the generated string.
109
109
  */
110
110
  /** @ignore */
111
- _call(prompt: string, options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): Promise<string>;
111
+ _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
112
112
  private streamingCall;
113
113
  private noStreamingCall;
114
114
  /**
@@ -117,5 +117,5 @@ export declare class SageMakerEndpoint extends LLM<BaseLLMCallOptions> {
117
117
  * @param {this["ParsedCallOptions"]} options Parsed call options.
118
118
  * @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
119
119
  */
120
- _streamResponseChunks(prompt: string, options: this["ParsedCallOptions"]): AsyncGenerator<GenerationChunk>;
120
+ _streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
121
121
  }
@@ -127,18 +127,18 @@ export class SageMakerEndpoint extends LLM {
127
127
  * Calls the SageMaker endpoint and retrieves the result.
128
128
  * @param {string} prompt The input prompt.
129
129
  * @param {this["ParsedCallOptions"]} options Parsed call options.
130
- * @param {CallbackManagerForLLMRun} _runManager Optional run manager.
130
+ * @param {CallbackManagerForLLMRun} runManager Optional run manager.
131
131
  * @returns {Promise<string>} A promise that resolves to the generated string.
132
132
  */
133
133
  /** @ignore */
134
- async _call(prompt, options, _runManager) {
134
+ async _call(prompt, options, runManager) {
135
135
  return this.streaming
136
- ? await this.streamingCall(prompt, options)
136
+ ? await this.streamingCall(prompt, options, runManager)
137
137
  : await this.noStreamingCall(prompt, options);
138
138
  }
139
- async streamingCall(prompt, options) {
139
+ async streamingCall(prompt, options, runManager) {
140
140
  const chunks = [];
141
- for await (const chunk of this._streamResponseChunks(prompt, options)) {
141
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
142
142
  chunks.push(chunk.text);
143
143
  }
144
144
  return chunks.join("");
@@ -164,7 +164,7 @@ export class SageMakerEndpoint extends LLM {
164
164
  * @param {this["ParsedCallOptions"]} options Parsed call options.
165
165
  * @returns {AsyncGenerator<GenerationChunk>} An asynchronous generator yielding generation chunks.
166
166
  */
167
- async *_streamResponseChunks(prompt, options) {
167
+ async *_streamResponseChunks(prompt, options, runManager) {
168
168
  const body = await this.contentHandler.transformInput(prompt, this.modelKwargs ?? {});
169
169
  const { contentType, accepts } = this.contentHandler;
170
170
  const stream = await this.caller.call(() => this.client.send(new InvokeEndpointWithResponseStreamCommand({
@@ -179,13 +179,15 @@ export class SageMakerEndpoint extends LLM {
179
179
  }
180
180
  for await (const chunk of stream.Body) {
181
181
  if (chunk.PayloadPart && chunk.PayloadPart.Bytes) {
182
+ const text = await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes);
182
183
  yield new GenerationChunk({
183
- text: await this.contentHandler.transformOutput(chunk.PayloadPart.Bytes),
184
+ text,
184
185
  generationInfo: {
185
186
  ...chunk,
186
187
  response: undefined,
187
188
  },
188
189
  });
190
+ await runManager?.handleLLMNewToken(text);
189
191
  }
190
192
  else if (chunk.InternalStreamFailure) {
191
193
  throw new Error(chunk.InternalStreamFailure.message);
@@ -43,6 +43,8 @@ exports.optionalImportEntrypoints = [
43
43
  "langchain/vectorstores/cassandra",
44
44
  "langchain/vectorstores/elasticsearch",
45
45
  "langchain/vectorstores/cloudflare_vectorize",
46
+ "langchain/vectorstores/closevector/web",
47
+ "langchain/vectorstores/closevector/node",
46
48
  "langchain/vectorstores/chroma",
47
49
  "langchain/vectorstores/googlevertexai",
48
50
  "langchain/vectorstores/hnswlib",
@@ -40,6 +40,8 @@ export const optionalImportEntrypoints = [
40
40
  "langchain/vectorstores/cassandra",
41
41
  "langchain/vectorstores/elasticsearch",
42
42
  "langchain/vectorstores/cloudflare_vectorize",
43
+ "langchain/vectorstores/closevector/web",
44
+ "langchain/vectorstores/closevector/node",
43
45
  "langchain/vectorstores/chroma",
44
46
  "langchain/vectorstores/googlevertexai",
45
47
  "langchain/vectorstores/hnswlib",
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = void 0;
27
+ exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.runnables__remote = exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -43,9 +43,11 @@ exports.llms__base = __importStar(require("../llms/base.cjs"));
43
43
  exports.llms__openai = __importStar(require("../llms/openai.cjs"));
44
44
  exports.llms__ai21 = __importStar(require("../llms/ai21.cjs"));
45
45
  exports.llms__aleph_alpha = __importStar(require("../llms/aleph_alpha.cjs"));
46
+ exports.llms__cloudflare_workersai = __importStar(require("../llms/cloudflare_workersai.cjs"));
46
47
  exports.llms__ollama = __importStar(require("../llms/ollama.cjs"));
47
48
  exports.llms__fireworks = __importStar(require("../llms/fireworks.cjs"));
48
49
  exports.llms__yandex = __importStar(require("../llms/yandex.cjs"));
50
+ exports.llms__fake = __importStar(require("../llms/fake.cjs"));
49
51
  exports.prompts = __importStar(require("../prompts/index.cjs"));
50
52
  exports.vectorstores__base = __importStar(require("../vectorstores/base.cjs"));
51
53
  exports.vectorstores__memory = __importStar(require("../vectorstores/memory.cjs"));
@@ -63,10 +65,12 @@ exports.document_transformers__openai_functions = __importStar(require("../docum
63
65
  exports.chat_models__base = __importStar(require("../chat_models/base.cjs"));
64
66
  exports.chat_models__openai = __importStar(require("../chat_models/openai.cjs"));
65
67
  exports.chat_models__anthropic = __importStar(require("../chat_models/anthropic.cjs"));
68
+ exports.chat_models__cloudflare_workersai = __importStar(require("../chat_models/cloudflare_workersai.cjs"));
66
69
  exports.chat_models__fireworks = __importStar(require("../chat_models/fireworks.cjs"));
67
70
  exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwenxin.cjs"));
68
71
  exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
69
72
  exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
73
+ exports.chat_models__fake = __importStar(require("../chat_models/fake.cjs"));
70
74
  exports.schema = __importStar(require("../schema/index.cjs"));
71
75
  exports.schema__document = __importStar(require("../schema/document.cjs"));
72
76
  exports.schema__output_parser = __importStar(require("../schema/output_parser.cjs"));