langchain 0.0.154 → 0.0.156

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/chat_models/bedrock.cjs +1 -0
  2. package/chat_models/bedrock.d.ts +1 -0
  3. package/chat_models/bedrock.js +1 -0
  4. package/dist/callbacks/base.d.ts +42 -28
  5. package/dist/callbacks/handlers/log_stream.cjs +283 -0
  6. package/dist/callbacks/handlers/log_stream.d.ts +99 -0
  7. package/dist/callbacks/handlers/log_stream.js +277 -0
  8. package/dist/callbacks/handlers/tracer.cjs +34 -18
  9. package/dist/callbacks/handlers/tracer.d.ts +18 -16
  10. package/dist/callbacks/handlers/tracer.js +34 -18
  11. package/dist/chat_models/bedrock.cjs +260 -0
  12. package/dist/chat_models/bedrock.d.ts +58 -0
  13. package/dist/chat_models/bedrock.js +254 -0
  14. package/dist/document_loaders/web/notionapi.cjs +8 -4
  15. package/dist/document_loaders/web/notionapi.js +8 -4
  16. package/dist/document_loaders/web/searchapi.cjs +134 -0
  17. package/dist/document_loaders/web/searchapi.d.ts +65 -0
  18. package/dist/document_loaders/web/searchapi.js +130 -0
  19. package/dist/embeddings/cloudflare_workersai.cjs +69 -0
  20. package/dist/embeddings/cloudflare_workersai.d.ts +28 -0
  21. package/dist/embeddings/cloudflare_workersai.js +65 -0
  22. package/dist/llms/bedrock.cjs +57 -67
  23. package/dist/llms/bedrock.d.ts +8 -35
  24. package/dist/llms/bedrock.js +57 -67
  25. package/dist/load/import_constants.cjs +4 -0
  26. package/dist/load/import_constants.js +4 -0
  27. package/dist/load/import_map.cjs +3 -2
  28. package/dist/load/import_map.d.ts +1 -0
  29. package/dist/load/import_map.js +1 -0
  30. package/dist/schema/runnable/base.cjs +64 -5
  31. package/dist/schema/runnable/base.d.ts +13 -0
  32. package/dist/schema/runnable/base.js +64 -5
  33. package/dist/tools/index.cjs +3 -1
  34. package/dist/tools/index.d.ts +1 -0
  35. package/dist/tools/index.js +1 -0
  36. package/dist/tools/searchapi.cjs +139 -0
  37. package/dist/tools/searchapi.d.ts +64 -0
  38. package/dist/tools/searchapi.js +135 -0
  39. package/dist/util/bedrock.cjs +54 -0
  40. package/dist/util/bedrock.d.ts +59 -0
  41. package/dist/util/bedrock.js +50 -0
  42. package/dist/util/fast-json-patch/index.cjs +48 -0
  43. package/dist/util/fast-json-patch/index.d.ts +21 -0
  44. package/dist/util/fast-json-patch/index.js +15 -0
  45. package/dist/util/fast-json-patch/src/core.cjs +469 -0
  46. package/dist/util/fast-json-patch/src/core.d.ts +111 -0
  47. package/dist/util/fast-json-patch/src/core.js +459 -0
  48. package/dist/util/fast-json-patch/src/helpers.cjs +194 -0
  49. package/dist/util/fast-json-patch/src/helpers.d.ts +36 -0
  50. package/dist/util/fast-json-patch/src/helpers.js +181 -0
  51. package/dist/util/googlevertexai-webauth.cjs +6 -2
  52. package/dist/util/googlevertexai-webauth.d.ts +1 -0
  53. package/dist/util/googlevertexai-webauth.js +6 -2
  54. package/dist/util/stream.cjs +2 -40
  55. package/dist/util/stream.d.ts +1 -2
  56. package/dist/util/stream.js +1 -38
  57. package/dist/vectorstores/cloudflare_vectorize.cjs +200 -0
  58. package/dist/vectorstores/cloudflare_vectorize.d.ts +90 -0
  59. package/dist/vectorstores/cloudflare_vectorize.js +173 -0
  60. package/dist/vectorstores/pgvector.cjs +1 -1
  61. package/dist/vectorstores/pgvector.js +1 -1
  62. package/dist/vectorstores/supabase.d.ts +1 -1
  63. package/dist/vectorstores/vercel_postgres.cjs +300 -0
  64. package/dist/vectorstores/vercel_postgres.d.ts +145 -0
  65. package/dist/vectorstores/vercel_postgres.js +296 -0
  66. package/document_loaders/web/searchapi.cjs +1 -0
  67. package/document_loaders/web/searchapi.d.ts +1 -0
  68. package/document_loaders/web/searchapi.js +1 -0
  69. package/embeddings/cloudflare_workersai.cjs +1 -0
  70. package/embeddings/cloudflare_workersai.d.ts +1 -0
  71. package/embeddings/cloudflare_workersai.js +1 -0
  72. package/package.json +60 -14
  73. package/vectorstores/cloudflare_vectorize.cjs +1 -0
  74. package/vectorstores/cloudflare_vectorize.d.ts +1 -0
  75. package/vectorstores/cloudflare_vectorize.js +1 -0
  76. package/vectorstores/vercel_postgres.cjs +1 -0
  77. package/vectorstores/vercel_postgres.d.ts +1 -0
  78. package/vectorstores/vercel_postgres.js +1 -0
@@ -0,0 +1,69 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CloudflareWorkersAIEmbeddings = void 0;
4
+ const ai_1 = require("@cloudflare/ai");
5
+ const chunk_js_1 = require("../util/chunk.cjs");
6
+ const base_js_1 = require("./base.cjs");
7
+ class CloudflareWorkersAIEmbeddings extends base_js_1.Embeddings {
8
+ constructor(fields) {
9
+ super(fields);
10
+ Object.defineProperty(this, "modelName", {
11
+ enumerable: true,
12
+ configurable: true,
13
+ writable: true,
14
+ value: "@cf/baai/bge-base-en-v1.5"
15
+ });
16
+ Object.defineProperty(this, "batchSize", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: 50
21
+ });
22
+ Object.defineProperty(this, "stripNewLines", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: true
27
+ });
28
+ Object.defineProperty(this, "ai", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ if (!fields.binding) {
35
+ throw new Error("Must supply a Workers AI binding, eg { binding: env.AI }");
36
+ }
37
+ this.ai = new ai_1.Ai(fields.binding);
38
+ this.modelName = fields.modelName ?? this.modelName;
39
+ this.stripNewLines = fields.stripNewLines ?? this.stripNewLines;
40
+ }
41
+ async embedDocuments(texts) {
42
+ const batches = (0, chunk_js_1.chunkArray)(this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts, this.batchSize);
43
+ const batchRequests = batches.map((batch) => this.runEmbedding(batch));
44
+ const batchResponses = await Promise.all(batchRequests);
45
+ const embeddings = [];
46
+ for (let i = 0; i < batchResponses.length; i += 1) {
47
+ const batchResponse = batchResponses[i];
48
+ for (let j = 0; j < batchResponse.length; j += 1) {
49
+ embeddings.push(batchResponse[j]);
50
+ }
51
+ }
52
+ return embeddings;
53
+ }
54
+ async embedQuery(text) {
55
+ const data = await this.runEmbedding([
56
+ this.stripNewLines ? text.replace(/\n/g, " ") : text,
57
+ ]);
58
+ return data[0];
59
+ }
60
+ async runEmbedding(texts) {
61
+ return this.caller.call(async () => {
62
+ const response = await this.ai.run(this.modelName, {
63
+ text: texts,
64
+ });
65
+ return response.data;
66
+ });
67
+ }
68
+ }
69
+ exports.CloudflareWorkersAIEmbeddings = CloudflareWorkersAIEmbeddings;
@@ -0,0 +1,28 @@
1
+ import { Ai } from "@cloudflare/ai";
2
+ import { Fetcher } from "@cloudflare/workers-types";
3
+ import { Embeddings, EmbeddingsParams } from "./base.js";
4
+ export interface CloudflareWorkersAIEmbeddingsParams extends EmbeddingsParams {
5
+ /** Binding */
6
+ binding: Fetcher;
7
+ /** Model name to use */
8
+ modelName?: string;
9
+ /**
10
+ * The maximum number of documents to embed in a single request.
11
+ */
12
+ batchSize?: number;
13
+ /**
14
+ * Whether to strip new lines from the input text. This is recommended by
15
+ * OpenAI, but may not be suitable for all use cases.
16
+ */
17
+ stripNewLines?: boolean;
18
+ }
19
+ export declare class CloudflareWorkersAIEmbeddings extends Embeddings {
20
+ modelName: string;
21
+ batchSize: number;
22
+ stripNewLines: boolean;
23
+ ai: Ai;
24
+ constructor(fields: CloudflareWorkersAIEmbeddingsParams);
25
+ embedDocuments(texts: string[]): Promise<number[][]>;
26
+ embedQuery(text: string): Promise<number[]>;
27
+ private runEmbedding;
28
+ }
@@ -0,0 +1,65 @@
1
+ import { Ai } from "@cloudflare/ai";
2
+ import { chunkArray } from "../util/chunk.js";
3
+ import { Embeddings } from "./base.js";
4
+ export class CloudflareWorkersAIEmbeddings extends Embeddings {
5
+ constructor(fields) {
6
+ super(fields);
7
+ Object.defineProperty(this, "modelName", {
8
+ enumerable: true,
9
+ configurable: true,
10
+ writable: true,
11
+ value: "@cf/baai/bge-base-en-v1.5"
12
+ });
13
+ Object.defineProperty(this, "batchSize", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: 50
18
+ });
19
+ Object.defineProperty(this, "stripNewLines", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: true
24
+ });
25
+ Object.defineProperty(this, "ai", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ if (!fields.binding) {
32
+ throw new Error("Must supply a Workers AI binding, eg { binding: env.AI }");
33
+ }
34
+ this.ai = new Ai(fields.binding);
35
+ this.modelName = fields.modelName ?? this.modelName;
36
+ this.stripNewLines = fields.stripNewLines ?? this.stripNewLines;
37
+ }
38
+ async embedDocuments(texts) {
39
+ const batches = chunkArray(this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts, this.batchSize);
40
+ const batchRequests = batches.map((batch) => this.runEmbedding(batch));
41
+ const batchResponses = await Promise.all(batchRequests);
42
+ const embeddings = [];
43
+ for (let i = 0; i < batchResponses.length; i += 1) {
44
+ const batchResponse = batchResponses[i];
45
+ for (let j = 0; j < batchResponse.length; j += 1) {
46
+ embeddings.push(batchResponse[j]);
47
+ }
48
+ }
49
+ return embeddings;
50
+ }
51
+ async embedQuery(text) {
52
+ const data = await this.runEmbedding([
53
+ this.stripNewLines ? text.replace(/\n/g, " ") : text,
54
+ ]);
55
+ return data[0];
56
+ }
57
+ async runEmbedding(texts) {
58
+ return this.caller.call(async () => {
59
+ const response = await this.ai.run(this.modelName, {
60
+ text: texts,
61
+ });
62
+ return response.data;
63
+ });
64
+ }
65
+ }
@@ -1,67 +1,20 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.Bedrock = void 0;
4
- const signature_v4_1 = require("@aws-sdk/signature-v4");
4
+ const signature_v4_1 = require("@smithy/signature-v4");
5
5
  const credential_provider_node_1 = require("@aws-sdk/credential-provider-node");
6
- const protocol_http_1 = require("@aws-sdk/protocol-http");
6
+ const protocol_http_1 = require("@smithy/protocol-http");
7
7
  const eventstream_codec_1 = require("@smithy/eventstream-codec");
8
8
  const util_utf8_1 = require("@smithy/util-utf8");
9
9
  const sha256_js_1 = require("@aws-crypto/sha256-js");
10
+ const bedrock_js_1 = require("../util/bedrock.cjs");
10
11
  const env_js_1 = require("../util/env.cjs");
11
12
  const base_js_1 = require("./base.cjs");
12
13
  const index_js_1 = require("../schema/index.cjs");
13
- /**
14
- * A helper class used within the `Bedrock` class. It is responsible for
15
- * preparing the input and output for the Bedrock service. It formats the
16
- * input prompt based on the provider (e.g., "anthropic", "ai21",
17
- * "amazon") and extracts the generated text from the service response.
18
- */
19
- class BedrockLLMInputOutputAdapter {
20
- /** Adapter class to prepare the inputs from Langchain to a format
21
- that LLM model expects. Also, provides a helper function to extract
22
- the generated text from the model response. */
23
- static prepareInput(provider, prompt, maxTokens = 50, temperature = 0) {
24
- const inputBody = {};
25
- if (provider === "anthropic") {
26
- inputBody.prompt = prompt;
27
- inputBody.max_tokens_to_sample = maxTokens;
28
- inputBody.temperature = temperature;
29
- }
30
- else if (provider === "ai21") {
31
- inputBody.prompt = prompt;
32
- inputBody.maxTokens = maxTokens;
33
- inputBody.temperature = temperature;
34
- }
35
- else if (provider === "amazon") {
36
- inputBody.inputText = prompt;
37
- inputBody.textGenerationConfig = {
38
- maxTokenCount: maxTokens,
39
- temperature,
40
- };
41
- }
42
- return inputBody;
43
- }
44
- /**
45
- * Extracts the generated text from the service response.
46
- * @param provider The provider name.
47
- * @param responseBody The response body from the service.
48
- * @returns The generated text.
49
- */
50
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
51
- static prepareOutput(provider, responseBody) {
52
- if (provider === "anthropic") {
53
- return responseBody.completion;
54
- }
55
- else if (provider === "ai21") {
56
- return responseBody.data.text;
57
- }
58
- return responseBody.outputText;
59
- }
60
- }
61
14
  /**
62
15
  * A type of Large Language Model (LLM) that interacts with the Bedrock
63
16
  * service. It extends the base `LLM` class and implements the
64
- * `BedrockInput` interface. The class is designed to authenticate and
17
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
65
18
  * interact with the Bedrock service, which is a part of Amazon Web
66
19
  * Services (AWS). It uses AWS credentials for authentication and can be
67
20
  * configured with various parameters such as the model to use, the AWS
@@ -74,6 +27,9 @@ class Bedrock extends base_js_1.LLM {
74
27
  _llmType() {
75
28
  return "bedrock";
76
29
  }
30
+ static lc_name() {
31
+ return "Bedrock";
32
+ }
77
33
  constructor(fields) {
78
34
  super(fields ?? {});
79
35
  Object.defineProperty(this, "model", {
@@ -112,7 +68,19 @@ class Bedrock extends base_js_1.LLM {
112
68
  writable: true,
113
69
  value: void 0
114
70
  });
115
- Object.defineProperty(this, "endpointUrl", {
71
+ Object.defineProperty(this, "endpointHost", {
72
+ enumerable: true,
73
+ configurable: true,
74
+ writable: true,
75
+ value: void 0
76
+ });
77
+ Object.defineProperty(this, "stopSequences", {
78
+ enumerable: true,
79
+ configurable: true,
80
+ writable: true,
81
+ value: void 0
82
+ });
83
+ Object.defineProperty(this, "modelKwargs", {
116
84
  enumerable: true,
117
85
  configurable: true,
118
86
  writable: true,
@@ -138,7 +106,9 @@ class Bedrock extends base_js_1.LLM {
138
106
  this.temperature = fields?.temperature ?? this.temperature;
139
107
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
140
108
  this.fetchFn = fields?.fetchFn ?? fetch;
141
- this.endpointUrl = fields?.endpointUrl;
109
+ this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
110
+ this.stopSequences = fields?.stopSequences;
111
+ this.modelKwargs = fields?.modelKwargs;
142
112
  }
143
113
  /** Call out to Bedrock service model.
144
114
  Arguments:
@@ -159,10 +129,11 @@ class Bedrock extends base_js_1.LLM {
159
129
  }
160
130
  async *_streamResponseChunks(prompt, options, runManager) {
161
131
  const provider = this.model.split(".")[0];
162
- const service = "bedrock";
163
- const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature);
164
- const endpointUrl = this.endpointUrl ?? `${service}.${this.region}.amazonaws.com`;
165
- const url = new URL(`https://${endpointUrl}/model/${this.model}/invoke-with-response-stream`);
132
+ const service = "bedrock-runtime";
133
+ const inputBody = bedrock_js_1.BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature, this.stopSequences, this.modelKwargs);
134
+ const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
135
+ const amazonMethod = provider === "anthropic" ? "invoke-with-response-stream" : "invoke";
136
+ const url = new URL(`https://${endpointHost}/model/${this.model}/${amazonMethod}`);
166
137
  const request = new protocol_http_1.HttpRequest({
167
138
  hostname: url.hostname,
168
139
  path: url.pathname,
@@ -174,12 +145,12 @@ class Bedrock extends base_js_1.LLM {
174
145
  // host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
175
146
  host: url.host,
176
147
  accept: "application/json",
177
- "Content-Type": "application/json",
148
+ "content-type": "application/json",
178
149
  },
179
150
  });
180
151
  const signer = new signature_v4_1.SignatureV4({
181
152
  credentials: this.credentials,
182
- service,
153
+ service: "bedrock",
183
154
  region: this.region,
184
155
  sha256: sha256_js_1.Sha256,
185
156
  });
@@ -193,15 +164,34 @@ class Bedrock extends base_js_1.LLM {
193
164
  if (response.status < 200 || response.status >= 300) {
194
165
  throw Error(`Failed to access underlying url '${url}': got ${response.status} ${response.statusText}: ${await response.text()}`);
195
166
  }
196
- const reader = response.body?.getReader();
197
- for await (const chunk of this._readChunks(reader)) {
198
- const event = this.codec.decode(chunk);
199
- if (event.headers[":event-type"].value !== "chunk" ||
200
- event.headers[":content-type"].value !== "application/json") {
201
- throw Error(`Failed to get event chunk: got ${chunk}`);
167
+ if (provider === "anthropic") {
168
+ const reader = response.body?.getReader();
169
+ const decoder = new TextDecoder();
170
+ for await (const chunk of this._readChunks(reader)) {
171
+ const event = this.codec.decode(chunk);
172
+ if ((event.headers[":event-type"] !== undefined &&
173
+ event.headers[":event-type"].value !== "chunk") ||
174
+ event.headers[":content-type"].value !== "application/json") {
175
+ throw Error(`Failed to get event chunk: got ${chunk}`);
176
+ }
177
+ const body = JSON.parse(decoder.decode(event.body));
178
+ if (body.message) {
179
+ throw new Error(body.message);
180
+ }
181
+ if (body.bytes !== undefined) {
182
+ const chunkResult = JSON.parse(Buffer.from(body.bytes, "base64").toString());
183
+ const text = bedrock_js_1.BedrockLLMInputOutputAdapter.prepareOutput(provider, chunkResult);
184
+ yield new index_js_1.GenerationChunk({
185
+ text,
186
+ generationInfo: {},
187
+ });
188
+ await runManager?.handleLLMNewToken(text);
189
+ }
202
190
  }
203
- const body = JSON.parse(Buffer.from(JSON.parse(new TextDecoder("utf-8").decode(event.body)).bytes, "base64").toString());
204
- const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, body);
191
+ }
192
+ else {
193
+ const json = await response.json();
194
+ const text = bedrock_js_1.BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
205
195
  yield new index_js_1.GenerationChunk({
206
196
  text,
207
197
  generationInfo: {},
@@ -1,60 +1,34 @@
1
1
  import { EventStreamCodec } from "@smithy/eventstream-codec";
2
- import type { AwsCredentialIdentity, Provider } from "@aws-sdk/types";
2
+ import { BaseBedrockInput, type CredentialType } from "../util/bedrock.js";
3
3
  import { LLM, BaseLLMParams } from "./base.js";
4
4
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
5
5
  import { GenerationChunk } from "../schema/index.js";
6
- type CredentialType = AwsCredentialIdentity | Provider<AwsCredentialIdentity>;
7
- /** Bedrock models.
8
- To authenticate, the AWS client uses the following methods to automatically load credentials:
9
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
10
- If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used.
11
- Make sure the credentials / roles used have the required policies to access the Bedrock service.
12
- */
13
- export interface BedrockInput {
14
- /** Model to use.
15
- For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api.
16
- */
17
- model: string;
18
- /** The AWS region e.g. `us-west-2`.
19
- Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
20
- */
21
- region?: string;
22
- /** AWS Credentials.
23
- If no credentials are provided, the default credentials from `@aws-sdk/credential-provider-node` will be used.
24
- */
25
- credentials?: CredentialType;
26
- /** Temperature */
27
- temperature?: number;
28
- /** Max tokens */
29
- maxTokens?: number;
30
- /** A custom fetch function for low-level access to AWS API. Defaults to fetch() */
31
- fetchFn?: typeof fetch;
32
- /** Override the default endpoint url */
33
- endpointUrl?: string;
34
- }
35
6
  /**
36
7
  * A type of Large Language Model (LLM) that interacts with the Bedrock
37
8
  * service. It extends the base `LLM` class and implements the
38
- * `BedrockInput` interface. The class is designed to authenticate and
9
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
39
10
  * interact with the Bedrock service, which is a part of Amazon Web
40
11
  * Services (AWS). It uses AWS credentials for authentication and can be
41
12
  * configured with various parameters such as the model to use, the AWS
42
13
  * region, and the maximum number of tokens to generate.
43
14
  */
44
- export declare class Bedrock extends LLM implements BedrockInput {
15
+ export declare class Bedrock extends LLM implements BaseBedrockInput {
45
16
  model: string;
46
17
  region: string;
47
18
  credentials: CredentialType;
48
19
  temperature?: number | undefined;
49
20
  maxTokens?: number | undefined;
50
21
  fetchFn: typeof fetch;
51
- endpointUrl?: string;
22
+ endpointHost?: string;
23
+ stopSequences?: string[];
24
+ modelKwargs?: Record<string, unknown>;
52
25
  codec: EventStreamCodec;
53
26
  get lc_secrets(): {
54
27
  [key: string]: string;
55
28
  } | undefined;
56
29
  _llmType(): string;
57
- constructor(fields?: Partial<BedrockInput> & BaseLLMParams);
30
+ static lc_name(): string;
31
+ constructor(fields?: Partial<BaseBedrockInput> & BaseLLMParams);
58
32
  /** Call out to Bedrock service model.
59
33
  Arguments:
60
34
  prompt: The prompt to pass into the model.
@@ -71,4 +45,3 @@ export declare class Bedrock extends LLM implements BedrockInput {
71
45
  [Symbol.asyncIterator](): AsyncGenerator<any, void, unknown>;
72
46
  };
73
47
  }
74
- export {};
@@ -1,64 +1,17 @@
1
- import { SignatureV4 } from "@aws-sdk/signature-v4";
1
+ import { SignatureV4 } from "@smithy/signature-v4";
2
2
  import { defaultProvider } from "@aws-sdk/credential-provider-node";
3
- import { HttpRequest } from "@aws-sdk/protocol-http";
3
+ import { HttpRequest } from "@smithy/protocol-http";
4
4
  import { EventStreamCodec } from "@smithy/eventstream-codec";
5
5
  import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
6
6
  import { Sha256 } from "@aws-crypto/sha256-js";
7
+ import { BedrockLLMInputOutputAdapter, } from "../util/bedrock.js";
7
8
  import { getEnvironmentVariable } from "../util/env.js";
8
9
  import { LLM } from "./base.js";
9
10
  import { GenerationChunk } from "../schema/index.js";
10
- /**
11
- * A helper class used within the `Bedrock` class. It is responsible for
12
- * preparing the input and output for the Bedrock service. It formats the
13
- * input prompt based on the provider (e.g., "anthropic", "ai21",
14
- * "amazon") and extracts the generated text from the service response.
15
- */
16
- class BedrockLLMInputOutputAdapter {
17
- /** Adapter class to prepare the inputs from Langchain to a format
18
- that LLM model expects. Also, provides a helper function to extract
19
- the generated text from the model response. */
20
- static prepareInput(provider, prompt, maxTokens = 50, temperature = 0) {
21
- const inputBody = {};
22
- if (provider === "anthropic") {
23
- inputBody.prompt = prompt;
24
- inputBody.max_tokens_to_sample = maxTokens;
25
- inputBody.temperature = temperature;
26
- }
27
- else if (provider === "ai21") {
28
- inputBody.prompt = prompt;
29
- inputBody.maxTokens = maxTokens;
30
- inputBody.temperature = temperature;
31
- }
32
- else if (provider === "amazon") {
33
- inputBody.inputText = prompt;
34
- inputBody.textGenerationConfig = {
35
- maxTokenCount: maxTokens,
36
- temperature,
37
- };
38
- }
39
- return inputBody;
40
- }
41
- /**
42
- * Extracts the generated text from the service response.
43
- * @param provider The provider name.
44
- * @param responseBody The response body from the service.
45
- * @returns The generated text.
46
- */
47
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
48
- static prepareOutput(provider, responseBody) {
49
- if (provider === "anthropic") {
50
- return responseBody.completion;
51
- }
52
- else if (provider === "ai21") {
53
- return responseBody.data.text;
54
- }
55
- return responseBody.outputText;
56
- }
57
- }
58
11
  /**
59
12
  * A type of Large Language Model (LLM) that interacts with the Bedrock
60
13
  * service. It extends the base `LLM` class and implements the
61
- * `BedrockInput` interface. The class is designed to authenticate and
14
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
62
15
  * interact with the Bedrock service, which is a part of Amazon Web
63
16
  * Services (AWS). It uses AWS credentials for authentication and can be
64
17
  * configured with various parameters such as the model to use, the AWS
@@ -71,6 +24,9 @@ export class Bedrock extends LLM {
71
24
  _llmType() {
72
25
  return "bedrock";
73
26
  }
27
+ static lc_name() {
28
+ return "Bedrock";
29
+ }
74
30
  constructor(fields) {
75
31
  super(fields ?? {});
76
32
  Object.defineProperty(this, "model", {
@@ -109,7 +65,19 @@ export class Bedrock extends LLM {
109
65
  writable: true,
110
66
  value: void 0
111
67
  });
112
- Object.defineProperty(this, "endpointUrl", {
68
+ Object.defineProperty(this, "endpointHost", {
69
+ enumerable: true,
70
+ configurable: true,
71
+ writable: true,
72
+ value: void 0
73
+ });
74
+ Object.defineProperty(this, "stopSequences", {
75
+ enumerable: true,
76
+ configurable: true,
77
+ writable: true,
78
+ value: void 0
79
+ });
80
+ Object.defineProperty(this, "modelKwargs", {
113
81
  enumerable: true,
114
82
  configurable: true,
115
83
  writable: true,
@@ -135,7 +103,9 @@ export class Bedrock extends LLM {
135
103
  this.temperature = fields?.temperature ?? this.temperature;
136
104
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
137
105
  this.fetchFn = fields?.fetchFn ?? fetch;
138
- this.endpointUrl = fields?.endpointUrl;
106
+ this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
107
+ this.stopSequences = fields?.stopSequences;
108
+ this.modelKwargs = fields?.modelKwargs;
139
109
  }
140
110
  /** Call out to Bedrock service model.
141
111
  Arguments:
@@ -156,10 +126,11 @@ export class Bedrock extends LLM {
156
126
  }
157
127
  async *_streamResponseChunks(prompt, options, runManager) {
158
128
  const provider = this.model.split(".")[0];
159
- const service = "bedrock";
160
- const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature);
161
- const endpointUrl = this.endpointUrl ?? `${service}.${this.region}.amazonaws.com`;
162
- const url = new URL(`https://${endpointUrl}/model/${this.model}/invoke-with-response-stream`);
129
+ const service = "bedrock-runtime";
130
+ const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature, this.stopSequences, this.modelKwargs);
131
+ const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
132
+ const amazonMethod = provider === "anthropic" ? "invoke-with-response-stream" : "invoke";
133
+ const url = new URL(`https://${endpointHost}/model/${this.model}/${amazonMethod}`);
163
134
  const request = new HttpRequest({
164
135
  hostname: url.hostname,
165
136
  path: url.pathname,
@@ -171,12 +142,12 @@ export class Bedrock extends LLM {
171
142
  // host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
172
143
  host: url.host,
173
144
  accept: "application/json",
174
- "Content-Type": "application/json",
145
+ "content-type": "application/json",
175
146
  },
176
147
  });
177
148
  const signer = new SignatureV4({
178
149
  credentials: this.credentials,
179
- service,
150
+ service: "bedrock",
180
151
  region: this.region,
181
152
  sha256: Sha256,
182
153
  });
@@ -190,15 +161,34 @@ export class Bedrock extends LLM {
190
161
  if (response.status < 200 || response.status >= 300) {
191
162
  throw Error(`Failed to access underlying url '${url}': got ${response.status} ${response.statusText}: ${await response.text()}`);
192
163
  }
193
- const reader = response.body?.getReader();
194
- for await (const chunk of this._readChunks(reader)) {
195
- const event = this.codec.decode(chunk);
196
- if (event.headers[":event-type"].value !== "chunk" ||
197
- event.headers[":content-type"].value !== "application/json") {
198
- throw Error(`Failed to get event chunk: got ${chunk}`);
164
+ if (provider === "anthropic") {
165
+ const reader = response.body?.getReader();
166
+ const decoder = new TextDecoder();
167
+ for await (const chunk of this._readChunks(reader)) {
168
+ const event = this.codec.decode(chunk);
169
+ if ((event.headers[":event-type"] !== undefined &&
170
+ event.headers[":event-type"].value !== "chunk") ||
171
+ event.headers[":content-type"].value !== "application/json") {
172
+ throw Error(`Failed to get event chunk: got ${chunk}`);
173
+ }
174
+ const body = JSON.parse(decoder.decode(event.body));
175
+ if (body.message) {
176
+ throw new Error(body.message);
177
+ }
178
+ if (body.bytes !== undefined) {
179
+ const chunkResult = JSON.parse(Buffer.from(body.bytes, "base64").toString());
180
+ const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, chunkResult);
181
+ yield new GenerationChunk({
182
+ text,
183
+ generationInfo: {},
184
+ });
185
+ await runManager?.handleLLMNewToken(text);
186
+ }
199
187
  }
200
- const body = JSON.parse(Buffer.from(JSON.parse(new TextDecoder("utf-8").decode(event.body)).bytes, "base64").toString());
201
- const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, body);
188
+ }
189
+ else {
190
+ const json = await response.json();
191
+ const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
202
192
  yield new GenerationChunk({
203
193
  text,
204
194
  generationInfo: {},
@@ -15,6 +15,7 @@ exports.optionalImportEntrypoints = [
15
15
  "langchain/chains/query_constructor",
16
16
  "langchain/chains/query_constructor/ir",
17
17
  "langchain/chains/sql_db",
18
+ "langchain/embeddings/cloudflare_workersai",
18
19
  "langchain/embeddings/cohere",
19
20
  "langchain/embeddings/tensorflow",
20
21
  "langchain/embeddings/hf",
@@ -36,6 +37,7 @@ exports.optionalImportEntrypoints = [
36
37
  "langchain/prompts/load",
37
38
  "langchain/vectorstores/analyticdb",
38
39
  "langchain/vectorstores/elasticsearch",
40
+ "langchain/vectorstores/cloudflare_vectorize",
39
41
  "langchain/vectorstores/chroma",
40
42
  "langchain/vectorstores/googlevertexai",
41
43
  "langchain/vectorstores/hnswlib",
@@ -57,6 +59,7 @@ exports.optionalImportEntrypoints = [
57
59
  "langchain/vectorstores/singlestore",
58
60
  "langchain/vectorstores/tigris",
59
61
  "langchain/vectorstores/usearch",
62
+ "langchain/vectorstores/vercel_postgres",
60
63
  "langchain/vectorstores/voy",
61
64
  "langchain/vectorstores/zep",
62
65
  "langchain/memory/zep",
@@ -95,6 +98,7 @@ exports.optionalImportEntrypoints = [
95
98
  "langchain/document_loaders/fs/openai_whisper_audio",
96
99
  "langchain/document_transformers/html_to_text",
97
100
  "langchain/document_transformers/mozilla_readability",
101
+ "langchain/chat_models/bedrock",
98
102
  "langchain/chat_models/googlevertexai",
99
103
  "langchain/chat_models/googlevertexai/web",
100
104
  "langchain/chat_models/googlepalm",