langchain 0.0.155 → 0.0.157

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/chains/graph_qa/cypher.cjs +1 -0
  2. package/chains/graph_qa/cypher.d.ts +1 -0
  3. package/chains/graph_qa/cypher.js +1 -0
  4. package/chat_models/bedrock.cjs +1 -0
  5. package/chat_models/bedrock.d.ts +1 -0
  6. package/chat_models/bedrock.js +1 -0
  7. package/dist/agents/index.d.ts +12 -12
  8. package/dist/agents/toolkits/index.d.ts +2 -2
  9. package/dist/cache/upstash_redis.cjs +1 -1
  10. package/dist/cache/upstash_redis.js +1 -1
  11. package/dist/callbacks/index.d.ts +3 -3
  12. package/dist/chains/graph_qa/cypher.cjs +151 -0
  13. package/dist/chains/graph_qa/cypher.d.ts +45 -0
  14. package/dist/chains/graph_qa/cypher.js +147 -0
  15. package/dist/chains/graph_qa/prompts.cjs +34 -0
  16. package/dist/chains/graph_qa/prompts.d.ts +9 -0
  17. package/dist/chains/graph_qa/prompts.js +31 -0
  18. package/dist/chains/index.d.ts +19 -19
  19. package/dist/chains/index.js +2 -2
  20. package/dist/chains/openai_functions/index.d.ts +3 -3
  21. package/dist/chains/query_constructor/index.d.ts +1 -1
  22. package/dist/chat_models/bedrock.cjs +260 -0
  23. package/dist/chat_models/bedrock.d.ts +58 -0
  24. package/dist/chat_models/bedrock.js +254 -0
  25. package/dist/chat_models/googlevertexai/index.cjs +1 -3
  26. package/dist/chat_models/googlevertexai/index.d.ts +1 -1
  27. package/dist/chat_models/googlevertexai/index.js +0 -1
  28. package/dist/chat_models/googlevertexai/web.cjs +1 -3
  29. package/dist/chat_models/googlevertexai/web.d.ts +1 -1
  30. package/dist/chat_models/googlevertexai/web.js +0 -1
  31. package/dist/chat_models/openai.d.ts +1 -1
  32. package/dist/embeddings/cloudflare_workersai.cjs +69 -0
  33. package/dist/embeddings/cloudflare_workersai.d.ts +28 -0
  34. package/dist/embeddings/cloudflare_workersai.js +65 -0
  35. package/dist/experimental/autogpt/index.d.ts +3 -3
  36. package/dist/experimental/babyagi/index.d.ts +1 -1
  37. package/dist/experimental/plan_and_execute/index.d.ts +1 -1
  38. package/dist/graphs/neo4j_graph.cjs +112 -0
  39. package/dist/graphs/neo4j_graph.d.ts +18 -0
  40. package/dist/graphs/neo4j_graph.js +105 -0
  41. package/dist/llms/bedrock.cjs +57 -67
  42. package/dist/llms/bedrock.d.ts +8 -35
  43. package/dist/llms/bedrock.js +57 -67
  44. package/dist/llms/openai-chat.d.ts +1 -1
  45. package/dist/llms/openai.d.ts +1 -1
  46. package/dist/load/import_constants.cjs +5 -0
  47. package/dist/load/import_constants.js +5 -0
  48. package/dist/memory/index.d.ts +8 -8
  49. package/dist/memory/index.js +1 -1
  50. package/dist/output_parsers/index.d.ts +3 -3
  51. package/dist/prompts/index.d.ts +8 -8
  52. package/dist/retrievers/remote/index.d.ts +3 -3
  53. package/dist/schema/runnable/index.d.ts +3 -3
  54. package/dist/sql_db.d.ts +1 -1
  55. package/dist/tools/index.d.ts +12 -12
  56. package/dist/util/bedrock.cjs +54 -0
  57. package/dist/util/bedrock.d.ts +59 -0
  58. package/dist/util/bedrock.js +50 -0
  59. package/dist/vectorstores/cloudflare_vectorize.cjs +200 -0
  60. package/dist/vectorstores/cloudflare_vectorize.d.ts +90 -0
  61. package/dist/vectorstores/cloudflare_vectorize.js +173 -0
  62. package/dist/vectorstores/supabase.d.ts +1 -1
  63. package/embeddings/cloudflare_workersai.cjs +1 -0
  64. package/embeddings/cloudflare_workersai.d.ts +1 -0
  65. package/embeddings/cloudflare_workersai.js +1 -0
  66. package/graphs/neo4j_graph.cjs +1 -0
  67. package/graphs/neo4j_graph.d.ts +1 -0
  68. package/graphs/neo4j_graph.js +1 -0
  69. package/package.json +62 -14
  70. package/vectorstores/cloudflare_vectorize.cjs +1 -0
  71. package/vectorstores/cloudflare_vectorize.d.ts +1 -0
  72. package/vectorstores/cloudflare_vectorize.js +1 -0
@@ -0,0 +1,260 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatBedrock = exports.convertMessagesToPrompt = exports.convertMessagesToPromptAnthropic = void 0;
4
+ const signature_v4_1 = require("@smithy/signature-v4");
5
+ const credential_provider_node_1 = require("@aws-sdk/credential-provider-node");
6
+ const protocol_http_1 = require("@smithy/protocol-http");
7
+ const eventstream_codec_1 = require("@smithy/eventstream-codec");
8
+ const util_utf8_1 = require("@smithy/util-utf8");
9
+ const sha256_js_1 = require("@aws-crypto/sha256-js");
10
+ const bedrock_js_1 = require("../util/bedrock.cjs");
11
+ const env_js_1 = require("../util/env.cjs");
12
+ const base_js_1 = require("./base.cjs");
13
+ const index_js_1 = require("../schema/index.cjs");
14
+ function convertOneMessageToText(message, humanPrompt, aiPrompt) {
15
+ if (message._getType() === "human") {
16
+ return `${humanPrompt} ${message.content}`;
17
+ }
18
+ else if (message._getType() === "ai") {
19
+ return `${aiPrompt} ${message.content}`;
20
+ }
21
+ else if (message._getType() === "system") {
22
+ return `${humanPrompt} <admin>${message.content}</admin>`;
23
+ }
24
+ else if (index_js_1.ChatMessage.isInstance(message)) {
25
+ return `\n\n${message.role[0].toUpperCase() + message.role.slice(1)}: {message.content}`;
26
+ }
27
+ throw new Error(`Unknown role: ${message._getType()}`);
28
+ }
29
+ function convertMessagesToPromptAnthropic(messages, humanPrompt = "\n\nHuman:", aiPrompt = "\n\nAssistant:") {
30
+ const messagesCopy = [...messages];
31
+ if (messagesCopy.length === 0 ||
32
+ messagesCopy[messagesCopy.length - 1]._getType() !== "ai") {
33
+ messagesCopy.push(new index_js_1.AIMessage({ content: "" }));
34
+ }
35
+ return messagesCopy
36
+ .map((message) => convertOneMessageToText(message, humanPrompt, aiPrompt))
37
+ .join("");
38
+ }
39
+ exports.convertMessagesToPromptAnthropic = convertMessagesToPromptAnthropic;
40
+ /**
41
+ * Function that converts an array of messages into a single string prompt
42
+ * that can be used as input for a chat model. It delegates the conversion
43
+ * logic to the appropriate provider-specific function.
44
+ * @param messages Array of messages to be converted.
45
+ * @param options Options to be used during the conversion.
46
+ * @returns A string prompt that can be used as input for a chat model.
47
+ */
48
+ function convertMessagesToPrompt(messages, provider) {
49
+ if (provider === "anthropic") {
50
+ return convertMessagesToPromptAnthropic(messages);
51
+ }
52
+ throw new Error(`Provider ${provider} does not support chat.`);
53
+ }
54
+ exports.convertMessagesToPrompt = convertMessagesToPrompt;
55
+ /**
56
+ * A type of Large Language Model (LLM) that interacts with the Bedrock
57
+ * service. It extends the base `LLM` class and implements the
58
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
59
+ * interact with the Bedrock service, which is a part of Amazon Web
60
+ * Services (AWS). It uses AWS credentials for authentication and can be
61
+ * configured with various parameters such as the model to use, the AWS
62
+ * region, and the maximum number of tokens to generate.
63
+ */
64
+ class ChatBedrock extends base_js_1.SimpleChatModel {
65
+ get lc_secrets() {
66
+ return {};
67
+ }
68
+ _llmType() {
69
+ return "bedrock";
70
+ }
71
+ static lc_name() {
72
+ return "ChatBedrock";
73
+ }
74
+ constructor(fields) {
75
+ super(fields ?? {});
76
+ Object.defineProperty(this, "model", {
77
+ enumerable: true,
78
+ configurable: true,
79
+ writable: true,
80
+ value: "amazon.titan-tg1-large"
81
+ });
82
+ Object.defineProperty(this, "region", {
83
+ enumerable: true,
84
+ configurable: true,
85
+ writable: true,
86
+ value: void 0
87
+ });
88
+ Object.defineProperty(this, "credentials", {
89
+ enumerable: true,
90
+ configurable: true,
91
+ writable: true,
92
+ value: void 0
93
+ });
94
+ Object.defineProperty(this, "temperature", {
95
+ enumerable: true,
96
+ configurable: true,
97
+ writable: true,
98
+ value: undefined
99
+ });
100
+ Object.defineProperty(this, "maxTokens", {
101
+ enumerable: true,
102
+ configurable: true,
103
+ writable: true,
104
+ value: undefined
105
+ });
106
+ Object.defineProperty(this, "fetchFn", {
107
+ enumerable: true,
108
+ configurable: true,
109
+ writable: true,
110
+ value: void 0
111
+ });
112
+ Object.defineProperty(this, "endpointHost", {
113
+ enumerable: true,
114
+ configurable: true,
115
+ writable: true,
116
+ value: void 0
117
+ });
118
+ Object.defineProperty(this, "stopSequences", {
119
+ enumerable: true,
120
+ configurable: true,
121
+ writable: true,
122
+ value: void 0
123
+ });
124
+ Object.defineProperty(this, "modelKwargs", {
125
+ enumerable: true,
126
+ configurable: true,
127
+ writable: true,
128
+ value: void 0
129
+ });
130
+ Object.defineProperty(this, "codec", {
131
+ enumerable: true,
132
+ configurable: true,
133
+ writable: true,
134
+ value: new eventstream_codec_1.EventStreamCodec(util_utf8_1.toUtf8, util_utf8_1.fromUtf8)
135
+ });
136
+ this.model = fields?.model ?? this.model;
137
+ const allowedModels = ["ai21", "anthropic", "amazon"];
138
+ if (!allowedModels.includes(this.model.split(".")[0])) {
139
+ throw new Error(`Unknown model: '${this.model}', only these are supported: ${allowedModels}`);
140
+ }
141
+ const region = fields?.region ?? (0, env_js_1.getEnvironmentVariable)("AWS_DEFAULT_REGION");
142
+ if (!region) {
143
+ throw new Error("Please set the AWS_DEFAULT_REGION environment variable or pass it to the constructor as the region field.");
144
+ }
145
+ this.region = region;
146
+ this.credentials = fields?.credentials ?? (0, credential_provider_node_1.defaultProvider)();
147
+ this.temperature = fields?.temperature ?? this.temperature;
148
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
149
+ this.fetchFn = fields?.fetchFn ?? fetch;
150
+ this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
151
+ this.stopSequences = fields?.stopSequences;
152
+ this.modelKwargs = fields?.modelKwargs;
153
+ }
154
+ /** Call out to Bedrock service model.
155
+ Arguments:
156
+ prompt: The prompt to pass into the model.
157
+
158
+ Returns:
159
+ The string generated by the model.
160
+
161
+ Example:
162
+ response = model.call("Tell me a joke.")
163
+ */
164
+ async _call(messages, options, runManager) {
165
+ const chunks = [];
166
+ for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
167
+ chunks.push(chunk);
168
+ }
169
+ return chunks.map((chunk) => chunk.text).join("");
170
+ }
171
+ async *_streamResponseChunks(messages, options, runManager) {
172
+ const provider = this.model.split(".")[0];
173
+ const service = "bedrock-runtime";
174
+ const inputBody = bedrock_js_1.BedrockLLMInputOutputAdapter.prepareInput(provider, convertMessagesToPromptAnthropic(messages), this.maxTokens, this.temperature, this.stopSequences, this.modelKwargs);
175
+ const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
176
+ const amazonMethod = provider === "anthropic" ? "invoke-with-response-stream" : "invoke";
177
+ const url = new URL(`https://${endpointHost}/model/${this.model}/${amazonMethod}`);
178
+ const request = new protocol_http_1.HttpRequest({
179
+ hostname: url.hostname,
180
+ path: url.pathname,
181
+ protocol: url.protocol,
182
+ method: "POST",
183
+ body: JSON.stringify(inputBody),
184
+ query: Object.fromEntries(url.searchParams.entries()),
185
+ headers: {
186
+ // host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
187
+ host: url.host,
188
+ accept: "application/json",
189
+ "content-type": "application/json",
190
+ },
191
+ });
192
+ const signer = new signature_v4_1.SignatureV4({
193
+ credentials: this.credentials,
194
+ service: "bedrock",
195
+ region: this.region,
196
+ sha256: sha256_js_1.Sha256,
197
+ });
198
+ const signedRequest = await signer.sign(request);
199
+ // Send request to AWS using the low-level fetch API
200
+ const response = await this.caller.callWithOptions({ signal: options.signal }, async () => this.fetchFn(url, {
201
+ headers: signedRequest.headers,
202
+ body: signedRequest.body,
203
+ method: signedRequest.method,
204
+ }));
205
+ if (response.status < 200 || response.status >= 300) {
206
+ throw Error(`Failed to access underlying url '${url}': got ${response.status} ${response.statusText}: ${await response.text()}`);
207
+ }
208
+ if (provider === "anthropic") {
209
+ const reader = response.body?.getReader();
210
+ const decoder = new TextDecoder();
211
+ for await (const chunk of this._readChunks(reader)) {
212
+ const event = this.codec.decode(chunk);
213
+ if ((event.headers[":event-type"] !== undefined &&
214
+ event.headers[":event-type"].value !== "chunk") ||
215
+ event.headers[":content-type"].value !== "application/json") {
216
+ throw Error(`Failed to get event chunk: got ${chunk}`);
217
+ }
218
+ // console.log(decoder.decode(event.body));
219
+ const body = JSON.parse(decoder.decode(event.body));
220
+ if (body.message) {
221
+ throw new Error(body.message);
222
+ }
223
+ if (body.bytes !== undefined) {
224
+ const chunkResult = JSON.parse(Buffer.from(body.bytes, "base64").toString());
225
+ const text = bedrock_js_1.BedrockLLMInputOutputAdapter.prepareOutput(provider, chunkResult);
226
+ yield new index_js_1.ChatGenerationChunk({
227
+ text,
228
+ message: new index_js_1.AIMessageChunk({ content: text }),
229
+ });
230
+ await runManager?.handleLLMNewToken(text);
231
+ }
232
+ }
233
+ }
234
+ else {
235
+ const json = await response.json();
236
+ const text = bedrock_js_1.BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
237
+ yield new index_js_1.ChatGenerationChunk({
238
+ text,
239
+ message: new index_js_1.AIMessageChunk({ content: text }),
240
+ });
241
+ await runManager?.handleLLMNewToken(text);
242
+ }
243
+ }
244
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
245
+ _readChunks(reader) {
246
+ return {
247
+ async *[Symbol.asyncIterator]() {
248
+ let readResult = await reader.read();
249
+ while (!readResult.done) {
250
+ yield readResult.value;
251
+ readResult = await reader.read();
252
+ }
253
+ },
254
+ };
255
+ }
256
+ _combineLLMOutput() {
257
+ return {};
258
+ }
259
+ }
260
+ exports.ChatBedrock = ChatBedrock;
@@ -0,0 +1,58 @@
1
+ import { EventStreamCodec } from "@smithy/eventstream-codec";
2
+ import { BaseBedrockInput, type CredentialType } from "../util/bedrock.js";
3
+ import { SimpleChatModel, BaseChatModelParams } from "./base.js";
4
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
5
+ import { BaseMessage, ChatGenerationChunk } from "../schema/index.js";
6
+ export declare function convertMessagesToPromptAnthropic(messages: BaseMessage[], humanPrompt?: string, aiPrompt?: string): string;
7
+ /**
8
+ * Function that converts an array of messages into a single string prompt
9
+ * that can be used as input for a chat model. It delegates the conversion
10
+ * logic to the appropriate provider-specific function.
11
+ * @param messages Array of messages to be converted.
12
+ * @param options Options to be used during the conversion.
13
+ * @returns A string prompt that can be used as input for a chat model.
14
+ */
15
+ export declare function convertMessagesToPrompt(messages: BaseMessage[], provider: string): string;
16
+ /**
17
+ * A type of Large Language Model (LLM) that interacts with the Bedrock
18
+ * service. It extends the base `LLM` class and implements the
19
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
20
+ * interact with the Bedrock service, which is a part of Amazon Web
21
+ * Services (AWS). It uses AWS credentials for authentication and can be
22
+ * configured with various parameters such as the model to use, the AWS
23
+ * region, and the maximum number of tokens to generate.
24
+ */
25
+ export declare class ChatBedrock extends SimpleChatModel implements BaseBedrockInput {
26
+ model: string;
27
+ region: string;
28
+ credentials: CredentialType;
29
+ temperature?: number | undefined;
30
+ maxTokens?: number | undefined;
31
+ fetchFn: typeof fetch;
32
+ endpointHost?: string;
33
+ stopSequences?: string[];
34
+ modelKwargs?: Record<string, unknown>;
35
+ codec: EventStreamCodec;
36
+ get lc_secrets(): {
37
+ [key: string]: string;
38
+ } | undefined;
39
+ _llmType(): string;
40
+ static lc_name(): string;
41
+ constructor(fields?: Partial<BaseBedrockInput> & BaseChatModelParams);
42
+ /** Call out to Bedrock service model.
43
+ Arguments:
44
+ prompt: The prompt to pass into the model.
45
+
46
+ Returns:
47
+ The string generated by the model.
48
+
49
+ Example:
50
+ response = model.call("Tell me a joke.")
51
+ */
52
+ _call(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
53
+ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
54
+ _readChunks(reader: any): {
55
+ [Symbol.asyncIterator](): AsyncGenerator<any, void, unknown>;
56
+ };
57
+ _combineLLMOutput(): {};
58
+ }
@@ -0,0 +1,254 @@
1
+ import { SignatureV4 } from "@smithy/signature-v4";
2
+ import { defaultProvider } from "@aws-sdk/credential-provider-node";
3
+ import { HttpRequest } from "@smithy/protocol-http";
4
+ import { EventStreamCodec } from "@smithy/eventstream-codec";
5
+ import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
6
+ import { Sha256 } from "@aws-crypto/sha256-js";
7
+ import { BedrockLLMInputOutputAdapter, } from "../util/bedrock.js";
8
+ import { getEnvironmentVariable } from "../util/env.js";
9
+ import { SimpleChatModel } from "./base.js";
10
+ import { AIMessageChunk, AIMessage, ChatGenerationChunk, ChatMessage, } from "../schema/index.js";
11
+ function convertOneMessageToText(message, humanPrompt, aiPrompt) {
12
+ if (message._getType() === "human") {
13
+ return `${humanPrompt} ${message.content}`;
14
+ }
15
+ else if (message._getType() === "ai") {
16
+ return `${aiPrompt} ${message.content}`;
17
+ }
18
+ else if (message._getType() === "system") {
19
+ return `${humanPrompt} <admin>${message.content}</admin>`;
20
+ }
21
+ else if (ChatMessage.isInstance(message)) {
22
+ return `\n\n${message.role[0].toUpperCase() + message.role.slice(1)}: {message.content}`;
23
+ }
24
+ throw new Error(`Unknown role: ${message._getType()}`);
25
+ }
26
+ export function convertMessagesToPromptAnthropic(messages, humanPrompt = "\n\nHuman:", aiPrompt = "\n\nAssistant:") {
27
+ const messagesCopy = [...messages];
28
+ if (messagesCopy.length === 0 ||
29
+ messagesCopy[messagesCopy.length - 1]._getType() !== "ai") {
30
+ messagesCopy.push(new AIMessage({ content: "" }));
31
+ }
32
+ return messagesCopy
33
+ .map((message) => convertOneMessageToText(message, humanPrompt, aiPrompt))
34
+ .join("");
35
+ }
36
+ /**
37
+ * Function that converts an array of messages into a single string prompt
38
+ * that can be used as input for a chat model. It delegates the conversion
39
+ * logic to the appropriate provider-specific function.
40
+ * @param messages Array of messages to be converted.
41
+ * @param options Options to be used during the conversion.
42
+ * @returns A string prompt that can be used as input for a chat model.
43
+ */
44
+ export function convertMessagesToPrompt(messages, provider) {
45
+ if (provider === "anthropic") {
46
+ return convertMessagesToPromptAnthropic(messages);
47
+ }
48
+ throw new Error(`Provider ${provider} does not support chat.`);
49
+ }
50
+ /**
51
+ * A type of Large Language Model (LLM) that interacts with the Bedrock
52
+ * service. It extends the base `LLM` class and implements the
53
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
54
+ * interact with the Bedrock service, which is a part of Amazon Web
55
+ * Services (AWS). It uses AWS credentials for authentication and can be
56
+ * configured with various parameters such as the model to use, the AWS
57
+ * region, and the maximum number of tokens to generate.
58
+ */
59
+ export class ChatBedrock extends SimpleChatModel {
60
+ get lc_secrets() {
61
+ return {};
62
+ }
63
+ _llmType() {
64
+ return "bedrock";
65
+ }
66
+ static lc_name() {
67
+ return "ChatBedrock";
68
+ }
69
+ constructor(fields) {
70
+ super(fields ?? {});
71
+ Object.defineProperty(this, "model", {
72
+ enumerable: true,
73
+ configurable: true,
74
+ writable: true,
75
+ value: "amazon.titan-tg1-large"
76
+ });
77
+ Object.defineProperty(this, "region", {
78
+ enumerable: true,
79
+ configurable: true,
80
+ writable: true,
81
+ value: void 0
82
+ });
83
+ Object.defineProperty(this, "credentials", {
84
+ enumerable: true,
85
+ configurable: true,
86
+ writable: true,
87
+ value: void 0
88
+ });
89
+ Object.defineProperty(this, "temperature", {
90
+ enumerable: true,
91
+ configurable: true,
92
+ writable: true,
93
+ value: undefined
94
+ });
95
+ Object.defineProperty(this, "maxTokens", {
96
+ enumerable: true,
97
+ configurable: true,
98
+ writable: true,
99
+ value: undefined
100
+ });
101
+ Object.defineProperty(this, "fetchFn", {
102
+ enumerable: true,
103
+ configurable: true,
104
+ writable: true,
105
+ value: void 0
106
+ });
107
+ Object.defineProperty(this, "endpointHost", {
108
+ enumerable: true,
109
+ configurable: true,
110
+ writable: true,
111
+ value: void 0
112
+ });
113
+ Object.defineProperty(this, "stopSequences", {
114
+ enumerable: true,
115
+ configurable: true,
116
+ writable: true,
117
+ value: void 0
118
+ });
119
+ Object.defineProperty(this, "modelKwargs", {
120
+ enumerable: true,
121
+ configurable: true,
122
+ writable: true,
123
+ value: void 0
124
+ });
125
+ Object.defineProperty(this, "codec", {
126
+ enumerable: true,
127
+ configurable: true,
128
+ writable: true,
129
+ value: new EventStreamCodec(toUtf8, fromUtf8)
130
+ });
131
+ this.model = fields?.model ?? this.model;
132
+ const allowedModels = ["ai21", "anthropic", "amazon"];
133
+ if (!allowedModels.includes(this.model.split(".")[0])) {
134
+ throw new Error(`Unknown model: '${this.model}', only these are supported: ${allowedModels}`);
135
+ }
136
+ const region = fields?.region ?? getEnvironmentVariable("AWS_DEFAULT_REGION");
137
+ if (!region) {
138
+ throw new Error("Please set the AWS_DEFAULT_REGION environment variable or pass it to the constructor as the region field.");
139
+ }
140
+ this.region = region;
141
+ this.credentials = fields?.credentials ?? defaultProvider();
142
+ this.temperature = fields?.temperature ?? this.temperature;
143
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
144
+ this.fetchFn = fields?.fetchFn ?? fetch;
145
+ this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
146
+ this.stopSequences = fields?.stopSequences;
147
+ this.modelKwargs = fields?.modelKwargs;
148
+ }
149
+ /** Call out to Bedrock service model.
150
+ Arguments:
151
+ prompt: The prompt to pass into the model.
152
+
153
+ Returns:
154
+ The string generated by the model.
155
+
156
+ Example:
157
+ response = model.call("Tell me a joke.")
158
+ */
159
+ async _call(messages, options, runManager) {
160
+ const chunks = [];
161
+ for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
162
+ chunks.push(chunk);
163
+ }
164
+ return chunks.map((chunk) => chunk.text).join("");
165
+ }
166
+ async *_streamResponseChunks(messages, options, runManager) {
167
+ const provider = this.model.split(".")[0];
168
+ const service = "bedrock-runtime";
169
+ const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, convertMessagesToPromptAnthropic(messages), this.maxTokens, this.temperature, this.stopSequences, this.modelKwargs);
170
+ const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
171
+ const amazonMethod = provider === "anthropic" ? "invoke-with-response-stream" : "invoke";
172
+ const url = new URL(`https://${endpointHost}/model/${this.model}/${amazonMethod}`);
173
+ const request = new HttpRequest({
174
+ hostname: url.hostname,
175
+ path: url.pathname,
176
+ protocol: url.protocol,
177
+ method: "POST",
178
+ body: JSON.stringify(inputBody),
179
+ query: Object.fromEntries(url.searchParams.entries()),
180
+ headers: {
181
+ // host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
182
+ host: url.host,
183
+ accept: "application/json",
184
+ "content-type": "application/json",
185
+ },
186
+ });
187
+ const signer = new SignatureV4({
188
+ credentials: this.credentials,
189
+ service: "bedrock",
190
+ region: this.region,
191
+ sha256: Sha256,
192
+ });
193
+ const signedRequest = await signer.sign(request);
194
+ // Send request to AWS using the low-level fetch API
195
+ const response = await this.caller.callWithOptions({ signal: options.signal }, async () => this.fetchFn(url, {
196
+ headers: signedRequest.headers,
197
+ body: signedRequest.body,
198
+ method: signedRequest.method,
199
+ }));
200
+ if (response.status < 200 || response.status >= 300) {
201
+ throw Error(`Failed to access underlying url '${url}': got ${response.status} ${response.statusText}: ${await response.text()}`);
202
+ }
203
+ if (provider === "anthropic") {
204
+ const reader = response.body?.getReader();
205
+ const decoder = new TextDecoder();
206
+ for await (const chunk of this._readChunks(reader)) {
207
+ const event = this.codec.decode(chunk);
208
+ if ((event.headers[":event-type"] !== undefined &&
209
+ event.headers[":event-type"].value !== "chunk") ||
210
+ event.headers[":content-type"].value !== "application/json") {
211
+ throw Error(`Failed to get event chunk: got ${chunk}`);
212
+ }
213
+ // console.log(decoder.decode(event.body));
214
+ const body = JSON.parse(decoder.decode(event.body));
215
+ if (body.message) {
216
+ throw new Error(body.message);
217
+ }
218
+ if (body.bytes !== undefined) {
219
+ const chunkResult = JSON.parse(Buffer.from(body.bytes, "base64").toString());
220
+ const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, chunkResult);
221
+ yield new ChatGenerationChunk({
222
+ text,
223
+ message: new AIMessageChunk({ content: text }),
224
+ });
225
+ await runManager?.handleLLMNewToken(text);
226
+ }
227
+ }
228
+ }
229
+ else {
230
+ const json = await response.json();
231
+ const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
232
+ yield new ChatGenerationChunk({
233
+ text,
234
+ message: new AIMessageChunk({ content: text }),
235
+ });
236
+ await runManager?.handleLLMNewToken(text);
237
+ }
238
+ }
239
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
240
+ _readChunks(reader) {
241
+ return {
242
+ async *[Symbol.asyncIterator]() {
243
+ let readResult = await reader.read();
244
+ while (!readResult.done) {
245
+ yield readResult.value;
246
+ readResult = await reader.read();
247
+ }
248
+ },
249
+ };
250
+ }
251
+ _combineLLMOutput() {
252
+ return {};
253
+ }
254
+ }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.GoogleVertexAIChatMessage = exports.ChatGoogleVertexAI = void 0;
3
+ exports.ChatGoogleVertexAI = void 0;
4
4
  const google_auth_library_1 = require("google-auth-library");
5
5
  const common_js_1 = require("./common.cjs");
6
6
  const googlevertexai_connection_js_1 = require("../../util/googlevertexai-connection.cjs");
@@ -32,5 +32,3 @@ class ChatGoogleVertexAI extends common_js_1.BaseChatGoogleVertexAI {
32
32
  }
33
33
  }
34
34
  exports.ChatGoogleVertexAI = ChatGoogleVertexAI;
35
- var common_js_2 = require("./common.cjs");
36
- Object.defineProperty(exports, "GoogleVertexAIChatMessage", { enumerable: true, get: function () { return common_js_2.GoogleVertexAIChatMessage; } });
@@ -18,4 +18,4 @@ export declare class ChatGoogleVertexAI extends BaseChatGoogleVertexAI<GoogleAut
18
18
  static lc_name(): string;
19
19
  constructor(fields?: GoogleVertexAIChatInput<GoogleAuthOptions>);
20
20
  }
21
- export { ChatExample, GoogleVertexAIChatAuthor, GoogleVertexAIChatInput, GoogleVertexAIChatInstance, GoogleVertexAIChatMessage, GoogleVertexAIChatMessageFields, GoogleVertexAIChatPrediction, } from "./common.js";
21
+ export type { ChatExample, GoogleVertexAIChatAuthor, GoogleVertexAIChatInput, GoogleVertexAIChatInstance, GoogleVertexAIChatMessage, GoogleVertexAIChatMessageFields, GoogleVertexAIChatPrediction, } from "./common.js";
@@ -28,4 +28,3 @@ export class ChatGoogleVertexAI extends BaseChatGoogleVertexAI {
28
28
  this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
29
29
  }
30
30
  }
31
- export { GoogleVertexAIChatMessage, } from "./common.js";
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.GoogleVertexAIChatMessage = exports.ChatGoogleVertexAI = void 0;
3
+ exports.ChatGoogleVertexAI = void 0;
4
4
  const googlevertexai_connection_js_1 = require("../../util/googlevertexai-connection.cjs");
5
5
  const googlevertexai_webauth_js_1 = require("../../util/googlevertexai-webauth.cjs");
6
6
  const common_js_1 = require("./common.cjs");
@@ -29,5 +29,3 @@ class ChatGoogleVertexAI extends common_js_1.BaseChatGoogleVertexAI {
29
29
  }
30
30
  }
31
31
  exports.ChatGoogleVertexAI = ChatGoogleVertexAI;
32
- var common_js_2 = require("./common.cjs");
33
- Object.defineProperty(exports, "GoogleVertexAIChatMessage", { enumerable: true, get: function () { return common_js_2.GoogleVertexAIChatMessage; } });
@@ -16,4 +16,4 @@ export declare class ChatGoogleVertexAI extends BaseChatGoogleVertexAI<WebGoogle
16
16
  };
17
17
  constructor(fields?: GoogleVertexAIChatInput<WebGoogleAuthOptions>);
18
18
  }
19
- export { ChatExample, GoogleVertexAIChatAuthor, GoogleVertexAIChatInput, GoogleVertexAIChatInstance, GoogleVertexAIChatMessage, GoogleVertexAIChatMessageFields, GoogleVertexAIChatPrediction, } from "./common.js";
19
+ export type { ChatExample, GoogleVertexAIChatAuthor, GoogleVertexAIChatInput, GoogleVertexAIChatInstance, GoogleVertexAIChatMessage, GoogleVertexAIChatMessageFields, GoogleVertexAIChatPrediction, } from "./common.js";
@@ -25,4 +25,3 @@ export class ChatGoogleVertexAI extends BaseChatGoogleVertexAI {
25
25
  this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
26
26
  }
27
27
  }
28
- export { GoogleVertexAIChatMessage, } from "./common.js";
@@ -5,7 +5,7 @@ import { StructuredTool } from "../tools/base.js";
5
5
  import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput } from "../types/openai-types.js";
6
6
  import { BaseChatModel, BaseChatModelParams } from "./base.js";
7
7
  import { BaseFunctionCallOptions } from "../base_language/index.js";
8
- export { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput };
8
+ export type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput };
9
9
  interface TokenUsage {
10
10
  completionTokens?: number;
11
11
  promptTokens?: number;