langchain 0.0.155 → 0.0.157

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/chains/graph_qa/cypher.cjs +1 -0
  2. package/chains/graph_qa/cypher.d.ts +1 -0
  3. package/chains/graph_qa/cypher.js +1 -0
  4. package/chat_models/bedrock.cjs +1 -0
  5. package/chat_models/bedrock.d.ts +1 -0
  6. package/chat_models/bedrock.js +1 -0
  7. package/dist/agents/index.d.ts +12 -12
  8. package/dist/agents/toolkits/index.d.ts +2 -2
  9. package/dist/cache/upstash_redis.cjs +1 -1
  10. package/dist/cache/upstash_redis.js +1 -1
  11. package/dist/callbacks/index.d.ts +3 -3
  12. package/dist/chains/graph_qa/cypher.cjs +151 -0
  13. package/dist/chains/graph_qa/cypher.d.ts +45 -0
  14. package/dist/chains/graph_qa/cypher.js +147 -0
  15. package/dist/chains/graph_qa/prompts.cjs +34 -0
  16. package/dist/chains/graph_qa/prompts.d.ts +9 -0
  17. package/dist/chains/graph_qa/prompts.js +31 -0
  18. package/dist/chains/index.d.ts +19 -19
  19. package/dist/chains/index.js +2 -2
  20. package/dist/chains/openai_functions/index.d.ts +3 -3
  21. package/dist/chains/query_constructor/index.d.ts +1 -1
  22. package/dist/chat_models/bedrock.cjs +260 -0
  23. package/dist/chat_models/bedrock.d.ts +58 -0
  24. package/dist/chat_models/bedrock.js +254 -0
  25. package/dist/chat_models/googlevertexai/index.cjs +1 -3
  26. package/dist/chat_models/googlevertexai/index.d.ts +1 -1
  27. package/dist/chat_models/googlevertexai/index.js +0 -1
  28. package/dist/chat_models/googlevertexai/web.cjs +1 -3
  29. package/dist/chat_models/googlevertexai/web.d.ts +1 -1
  30. package/dist/chat_models/googlevertexai/web.js +0 -1
  31. package/dist/chat_models/openai.d.ts +1 -1
  32. package/dist/embeddings/cloudflare_workersai.cjs +69 -0
  33. package/dist/embeddings/cloudflare_workersai.d.ts +28 -0
  34. package/dist/embeddings/cloudflare_workersai.js +65 -0
  35. package/dist/experimental/autogpt/index.d.ts +3 -3
  36. package/dist/experimental/babyagi/index.d.ts +1 -1
  37. package/dist/experimental/plan_and_execute/index.d.ts +1 -1
  38. package/dist/graphs/neo4j_graph.cjs +112 -0
  39. package/dist/graphs/neo4j_graph.d.ts +18 -0
  40. package/dist/graphs/neo4j_graph.js +105 -0
  41. package/dist/llms/bedrock.cjs +57 -67
  42. package/dist/llms/bedrock.d.ts +8 -35
  43. package/dist/llms/bedrock.js +57 -67
  44. package/dist/llms/openai-chat.d.ts +1 -1
  45. package/dist/llms/openai.d.ts +1 -1
  46. package/dist/load/import_constants.cjs +5 -0
  47. package/dist/load/import_constants.js +5 -0
  48. package/dist/memory/index.d.ts +8 -8
  49. package/dist/memory/index.js +1 -1
  50. package/dist/output_parsers/index.d.ts +3 -3
  51. package/dist/prompts/index.d.ts +8 -8
  52. package/dist/retrievers/remote/index.d.ts +3 -3
  53. package/dist/schema/runnable/index.d.ts +3 -3
  54. package/dist/sql_db.d.ts +1 -1
  55. package/dist/tools/index.d.ts +12 -12
  56. package/dist/util/bedrock.cjs +54 -0
  57. package/dist/util/bedrock.d.ts +59 -0
  58. package/dist/util/bedrock.js +50 -0
  59. package/dist/vectorstores/cloudflare_vectorize.cjs +200 -0
  60. package/dist/vectorstores/cloudflare_vectorize.d.ts +90 -0
  61. package/dist/vectorstores/cloudflare_vectorize.js +173 -0
  62. package/dist/vectorstores/supabase.d.ts +1 -1
  63. package/embeddings/cloudflare_workersai.cjs +1 -0
  64. package/embeddings/cloudflare_workersai.d.ts +1 -0
  65. package/embeddings/cloudflare_workersai.js +1 -0
  66. package/graphs/neo4j_graph.cjs +1 -0
  67. package/graphs/neo4j_graph.d.ts +1 -0
  68. package/graphs/neo4j_graph.js +1 -0
  69. package/package.json +62 -14
  70. package/vectorstores/cloudflare_vectorize.cjs +1 -0
  71. package/vectorstores/cloudflare_vectorize.d.ts +1 -0
  72. package/vectorstores/cloudflare_vectorize.js +1 -0
@@ -1,60 +1,34 @@
1
1
  import { EventStreamCodec } from "@smithy/eventstream-codec";
2
- import type { AwsCredentialIdentity, Provider } from "@aws-sdk/types";
2
+ import { BaseBedrockInput, type CredentialType } from "../util/bedrock.js";
3
3
  import { LLM, BaseLLMParams } from "./base.js";
4
4
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
5
5
  import { GenerationChunk } from "../schema/index.js";
6
- type CredentialType = AwsCredentialIdentity | Provider<AwsCredentialIdentity>;
7
- /** Bedrock models.
8
- To authenticate, the AWS client uses the following methods to automatically load credentials:
9
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
10
- If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used.
11
- Make sure the credentials / roles used have the required policies to access the Bedrock service.
12
- */
13
- export interface BedrockInput {
14
- /** Model to use.
15
- For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api.
16
- */
17
- model: string;
18
- /** The AWS region e.g. `us-west-2`.
19
- Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
20
- */
21
- region?: string;
22
- /** AWS Credentials.
23
- If no credentials are provided, the default credentials from `@aws-sdk/credential-provider-node` will be used.
24
- */
25
- credentials?: CredentialType;
26
- /** Temperature */
27
- temperature?: number;
28
- /** Max tokens */
29
- maxTokens?: number;
30
- /** A custom fetch function for low-level access to AWS API. Defaults to fetch() */
31
- fetchFn?: typeof fetch;
32
- /** Override the default endpoint url */
33
- endpointUrl?: string;
34
- }
35
6
  /**
36
7
  * A type of Large Language Model (LLM) that interacts with the Bedrock
37
8
  * service. It extends the base `LLM` class and implements the
38
- * `BedrockInput` interface. The class is designed to authenticate and
9
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
39
10
  * interact with the Bedrock service, which is a part of Amazon Web
40
11
  * Services (AWS). It uses AWS credentials for authentication and can be
41
12
  * configured with various parameters such as the model to use, the AWS
42
13
  * region, and the maximum number of tokens to generate.
43
14
  */
44
- export declare class Bedrock extends LLM implements BedrockInput {
15
+ export declare class Bedrock extends LLM implements BaseBedrockInput {
45
16
  model: string;
46
17
  region: string;
47
18
  credentials: CredentialType;
48
19
  temperature?: number | undefined;
49
20
  maxTokens?: number | undefined;
50
21
  fetchFn: typeof fetch;
51
- endpointUrl?: string;
22
+ endpointHost?: string;
23
+ stopSequences?: string[];
24
+ modelKwargs?: Record<string, unknown>;
52
25
  codec: EventStreamCodec;
53
26
  get lc_secrets(): {
54
27
  [key: string]: string;
55
28
  } | undefined;
56
29
  _llmType(): string;
57
- constructor(fields?: Partial<BedrockInput> & BaseLLMParams);
30
+ static lc_name(): string;
31
+ constructor(fields?: Partial<BaseBedrockInput> & BaseLLMParams);
58
32
  /** Call out to Bedrock service model.
59
33
  Arguments:
60
34
  prompt: The prompt to pass into the model.
@@ -71,4 +45,3 @@ export declare class Bedrock extends LLM implements BedrockInput {
71
45
  [Symbol.asyncIterator](): AsyncGenerator<any, void, unknown>;
72
46
  };
73
47
  }
74
- export {};
@@ -1,64 +1,17 @@
1
- import { SignatureV4 } from "@aws-sdk/signature-v4";
1
+ import { SignatureV4 } from "@smithy/signature-v4";
2
2
  import { defaultProvider } from "@aws-sdk/credential-provider-node";
3
- import { HttpRequest } from "@aws-sdk/protocol-http";
3
+ import { HttpRequest } from "@smithy/protocol-http";
4
4
  import { EventStreamCodec } from "@smithy/eventstream-codec";
5
5
  import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
6
6
  import { Sha256 } from "@aws-crypto/sha256-js";
7
+ import { BedrockLLMInputOutputAdapter, } from "../util/bedrock.js";
7
8
  import { getEnvironmentVariable } from "../util/env.js";
8
9
  import { LLM } from "./base.js";
9
10
  import { GenerationChunk } from "../schema/index.js";
10
- /**
11
- * A helper class used within the `Bedrock` class. It is responsible for
12
- * preparing the input and output for the Bedrock service. It formats the
13
- * input prompt based on the provider (e.g., "anthropic", "ai21",
14
- * "amazon") and extracts the generated text from the service response.
15
- */
16
- class BedrockLLMInputOutputAdapter {
17
- /** Adapter class to prepare the inputs from Langchain to a format
18
- that LLM model expects. Also, provides a helper function to extract
19
- the generated text from the model response. */
20
- static prepareInput(provider, prompt, maxTokens = 50, temperature = 0) {
21
- const inputBody = {};
22
- if (provider === "anthropic") {
23
- inputBody.prompt = prompt;
24
- inputBody.max_tokens_to_sample = maxTokens;
25
- inputBody.temperature = temperature;
26
- }
27
- else if (provider === "ai21") {
28
- inputBody.prompt = prompt;
29
- inputBody.maxTokens = maxTokens;
30
- inputBody.temperature = temperature;
31
- }
32
- else if (provider === "amazon") {
33
- inputBody.inputText = prompt;
34
- inputBody.textGenerationConfig = {
35
- maxTokenCount: maxTokens,
36
- temperature,
37
- };
38
- }
39
- return inputBody;
40
- }
41
- /**
42
- * Extracts the generated text from the service response.
43
- * @param provider The provider name.
44
- * @param responseBody The response body from the service.
45
- * @returns The generated text.
46
- */
47
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
48
- static prepareOutput(provider, responseBody) {
49
- if (provider === "anthropic") {
50
- return responseBody.completion;
51
- }
52
- else if (provider === "ai21") {
53
- return responseBody.data.text;
54
- }
55
- return responseBody.outputText;
56
- }
57
- }
58
11
  /**
59
12
  * A type of Large Language Model (LLM) that interacts with the Bedrock
60
13
  * service. It extends the base `LLM` class and implements the
61
- * `BedrockInput` interface. The class is designed to authenticate and
14
+ * `BaseBedrockInput` interface. The class is designed to authenticate and
62
15
  * interact with the Bedrock service, which is a part of Amazon Web
63
16
  * Services (AWS). It uses AWS credentials for authentication and can be
64
17
  * configured with various parameters such as the model to use, the AWS
@@ -71,6 +24,9 @@ export class Bedrock extends LLM {
71
24
  _llmType() {
72
25
  return "bedrock";
73
26
  }
27
+ static lc_name() {
28
+ return "Bedrock";
29
+ }
74
30
  constructor(fields) {
75
31
  super(fields ?? {});
76
32
  Object.defineProperty(this, "model", {
@@ -109,7 +65,19 @@ export class Bedrock extends LLM {
109
65
  writable: true,
110
66
  value: void 0
111
67
  });
112
- Object.defineProperty(this, "endpointUrl", {
68
+ Object.defineProperty(this, "endpointHost", {
69
+ enumerable: true,
70
+ configurable: true,
71
+ writable: true,
72
+ value: void 0
73
+ });
74
+ Object.defineProperty(this, "stopSequences", {
75
+ enumerable: true,
76
+ configurable: true,
77
+ writable: true,
78
+ value: void 0
79
+ });
80
+ Object.defineProperty(this, "modelKwargs", {
113
81
  enumerable: true,
114
82
  configurable: true,
115
83
  writable: true,
@@ -135,7 +103,9 @@ export class Bedrock extends LLM {
135
103
  this.temperature = fields?.temperature ?? this.temperature;
136
104
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
137
105
  this.fetchFn = fields?.fetchFn ?? fetch;
138
- this.endpointUrl = fields?.endpointUrl;
106
+ this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
107
+ this.stopSequences = fields?.stopSequences;
108
+ this.modelKwargs = fields?.modelKwargs;
139
109
  }
140
110
  /** Call out to Bedrock service model.
141
111
  Arguments:
@@ -156,10 +126,11 @@ export class Bedrock extends LLM {
156
126
  }
157
127
  async *_streamResponseChunks(prompt, options, runManager) {
158
128
  const provider = this.model.split(".")[0];
159
- const service = "bedrock";
160
- const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature);
161
- const endpointUrl = this.endpointUrl ?? `${service}.${this.region}.amazonaws.com`;
162
- const url = new URL(`https://${endpointUrl}/model/${this.model}/invoke-with-response-stream`);
129
+ const service = "bedrock-runtime";
130
+ const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature, this.stopSequences, this.modelKwargs);
131
+ const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
132
+ const amazonMethod = provider === "anthropic" ? "invoke-with-response-stream" : "invoke";
133
+ const url = new URL(`https://${endpointHost}/model/${this.model}/${amazonMethod}`);
163
134
  const request = new HttpRequest({
164
135
  hostname: url.hostname,
165
136
  path: url.pathname,
@@ -171,12 +142,12 @@ export class Bedrock extends LLM {
171
142
  // host is required by AWS Signature V4: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
172
143
  host: url.host,
173
144
  accept: "application/json",
174
- "Content-Type": "application/json",
145
+ "content-type": "application/json",
175
146
  },
176
147
  });
177
148
  const signer = new SignatureV4({
178
149
  credentials: this.credentials,
179
- service,
150
+ service: "bedrock",
180
151
  region: this.region,
181
152
  sha256: Sha256,
182
153
  });
@@ -190,15 +161,34 @@ export class Bedrock extends LLM {
190
161
  if (response.status < 200 || response.status >= 300) {
191
162
  throw Error(`Failed to access underlying url '${url}': got ${response.status} ${response.statusText}: ${await response.text()}`);
192
163
  }
193
- const reader = response.body?.getReader();
194
- for await (const chunk of this._readChunks(reader)) {
195
- const event = this.codec.decode(chunk);
196
- if (event.headers[":event-type"].value !== "chunk" ||
197
- event.headers[":content-type"].value !== "application/json") {
198
- throw Error(`Failed to get event chunk: got ${chunk}`);
164
+ if (provider === "anthropic") {
165
+ const reader = response.body?.getReader();
166
+ const decoder = new TextDecoder();
167
+ for await (const chunk of this._readChunks(reader)) {
168
+ const event = this.codec.decode(chunk);
169
+ if ((event.headers[":event-type"] !== undefined &&
170
+ event.headers[":event-type"].value !== "chunk") ||
171
+ event.headers[":content-type"].value !== "application/json") {
172
+ throw Error(`Failed to get event chunk: got ${chunk}`);
173
+ }
174
+ const body = JSON.parse(decoder.decode(event.body));
175
+ if (body.message) {
176
+ throw new Error(body.message);
177
+ }
178
+ if (body.bytes !== undefined) {
179
+ const chunkResult = JSON.parse(Buffer.from(body.bytes, "base64").toString());
180
+ const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, chunkResult);
181
+ yield new GenerationChunk({
182
+ text,
183
+ generationInfo: {},
184
+ });
185
+ await runManager?.handleLLMNewToken(text);
186
+ }
199
187
  }
200
- const body = JSON.parse(Buffer.from(JSON.parse(new TextDecoder("utf-8").decode(event.body)).bytes, "base64").toString());
201
- const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, body);
188
+ }
189
+ else {
190
+ const json = await response.json();
191
+ const text = BedrockLLMInputOutputAdapter.prepareOutput(provider, json);
202
192
  yield new GenerationChunk({
203
193
  text,
204
194
  generationInfo: {},
@@ -3,7 +3,7 @@ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
3
3
  import { GenerationChunk, LLMResult } from "../schema/index.js";
4
4
  import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput } from "../types/openai-types.js";
5
5
  import { BaseLLMParams, LLM } from "./base.js";
6
- export { AzureOpenAIInput, OpenAIChatInput };
6
+ export { type AzureOpenAIInput, type OpenAIChatInput };
7
7
  /**
8
8
  * Interface that extends the OpenAICallOptions interface and includes an
9
9
  * optional promptIndex property. It represents the options that can be
@@ -3,7 +3,7 @@ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
3
3
  import { GenerationChunk, LLMResult } from "../schema/index.js";
4
4
  import { AzureOpenAIInput, OpenAICallOptions, OpenAICoreRequestOptions, OpenAIInput, LegacyOpenAIInput } from "../types/openai-types.js";
5
5
  import { BaseLLM, BaseLLMParams } from "./base.js";
6
- export { AzureOpenAIInput, OpenAICallOptions, OpenAIInput };
6
+ export type { AzureOpenAIInput, OpenAICallOptions, OpenAIInput };
7
7
  /**
8
8
  * Wrapper around OpenAI large language models.
9
9
  *
@@ -15,6 +15,8 @@ exports.optionalImportEntrypoints = [
15
15
  "langchain/chains/query_constructor",
16
16
  "langchain/chains/query_constructor/ir",
17
17
  "langchain/chains/sql_db",
18
+ "langchain/chains/graph_qa/cypher",
19
+ "langchain/embeddings/cloudflare_workersai",
18
20
  "langchain/embeddings/cohere",
19
21
  "langchain/embeddings/tensorflow",
20
22
  "langchain/embeddings/hf",
@@ -36,6 +38,7 @@ exports.optionalImportEntrypoints = [
36
38
  "langchain/prompts/load",
37
39
  "langchain/vectorstores/analyticdb",
38
40
  "langchain/vectorstores/elasticsearch",
41
+ "langchain/vectorstores/cloudflare_vectorize",
39
42
  "langchain/vectorstores/chroma",
40
43
  "langchain/vectorstores/googlevertexai",
41
44
  "langchain/vectorstores/hnswlib",
@@ -96,6 +99,7 @@ exports.optionalImportEntrypoints = [
96
99
  "langchain/document_loaders/fs/openai_whisper_audio",
97
100
  "langchain/document_transformers/html_to_text",
98
101
  "langchain/document_transformers/mozilla_readability",
102
+ "langchain/chat_models/bedrock",
99
103
  "langchain/chat_models/googlevertexai",
100
104
  "langchain/chat_models/googlevertexai/web",
101
105
  "langchain/chat_models/googlepalm",
@@ -130,6 +134,7 @@ exports.optionalImportEntrypoints = [
130
134
  "langchain/stores/message/planetscale",
131
135
  "langchain/stores/message/xata",
132
136
  "langchain/storage/ioredis",
137
+ "langchain/graphs/neo4j_graph",
133
138
  "langchain/hub",
134
139
  "langchain/experimental/multimodal_embeddings/googlevertexai",
135
140
  "langchain/experimental/chat_models/anthropic_functions",
@@ -12,6 +12,8 @@ export const optionalImportEntrypoints = [
12
12
  "langchain/chains/query_constructor",
13
13
  "langchain/chains/query_constructor/ir",
14
14
  "langchain/chains/sql_db",
15
+ "langchain/chains/graph_qa/cypher",
16
+ "langchain/embeddings/cloudflare_workersai",
15
17
  "langchain/embeddings/cohere",
16
18
  "langchain/embeddings/tensorflow",
17
19
  "langchain/embeddings/hf",
@@ -33,6 +35,7 @@ export const optionalImportEntrypoints = [
33
35
  "langchain/prompts/load",
34
36
  "langchain/vectorstores/analyticdb",
35
37
  "langchain/vectorstores/elasticsearch",
38
+ "langchain/vectorstores/cloudflare_vectorize",
36
39
  "langchain/vectorstores/chroma",
37
40
  "langchain/vectorstores/googlevertexai",
38
41
  "langchain/vectorstores/hnswlib",
@@ -93,6 +96,7 @@ export const optionalImportEntrypoints = [
93
96
  "langchain/document_loaders/fs/openai_whisper_audio",
94
97
  "langchain/document_transformers/html_to_text",
95
98
  "langchain/document_transformers/mozilla_readability",
99
+ "langchain/chat_models/bedrock",
96
100
  "langchain/chat_models/googlevertexai",
97
101
  "langchain/chat_models/googlevertexai/web",
98
102
  "langchain/chat_models/googlepalm",
@@ -127,6 +131,7 @@ export const optionalImportEntrypoints = [
127
131
  "langchain/stores/message/planetscale",
128
132
  "langchain/stores/message/xata",
129
133
  "langchain/storage/ioredis",
134
+ "langchain/graphs/neo4j_graph",
130
135
  "langchain/hub",
131
136
  "langchain/experimental/multimodal_embeddings/googlevertexai",
132
137
  "langchain/experimental/chat_models/anthropic_functions",
@@ -1,12 +1,12 @@
1
- export { BufferMemory, BufferMemoryInput } from "./buffer_memory.js";
1
+ export { BufferMemory, type BufferMemoryInput } from "./buffer_memory.js";
2
2
  export { BaseMemory, getInputValue, getBufferString } from "./base.js";
3
- export { ConversationSummaryMemory, ConversationSummaryMemoryInput, BaseConversationSummaryMemory, BaseConversationSummaryMemoryInput, } from "./summary.js";
4
- export { BufferWindowMemory, BufferWindowMemoryInput, } from "./buffer_window_memory.js";
5
- export { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js";
3
+ export { ConversationSummaryMemory, type ConversationSummaryMemoryInput, BaseConversationSummaryMemory, type BaseConversationSummaryMemoryInput, } from "./summary.js";
4
+ export { BufferWindowMemory, type BufferWindowMemoryInput, } from "./buffer_window_memory.js";
5
+ export { BaseChatMemory, type BaseChatMemoryInput } from "./chat_memory.js";
6
6
  export { ChatMessageHistory } from "../stores/message/in_memory.js";
7
- export { MotorheadMemory, MotorheadMemoryInput } from "./motorhead_memory.js";
8
- export { VectorStoreRetrieverMemory, VectorStoreRetrieverMemoryParams, } from "./vector_store.js";
7
+ export { MotorheadMemory, type MotorheadMemoryInput, } from "./motorhead_memory.js";
8
+ export { VectorStoreRetrieverMemory, type VectorStoreRetrieverMemoryParams, } from "./vector_store.js";
9
9
  export { EntityMemory } from "./entity_memory.js";
10
10
  export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js";
11
- export { CombinedMemoryInput, CombinedMemory } from "./combined_memory.js";
12
- export { ConversationSummaryBufferMemory, ConversationSummaryBufferMemoryInput, } from "./summary_buffer.js";
11
+ export { type CombinedMemoryInput, CombinedMemory } from "./combined_memory.js";
12
+ export { ConversationSummaryBufferMemory, type ConversationSummaryBufferMemoryInput, } from "./summary_buffer.js";
@@ -4,7 +4,7 @@ export { ConversationSummaryMemory, BaseConversationSummaryMemory, } from "./sum
4
4
  export { BufferWindowMemory, } from "./buffer_window_memory.js";
5
5
  export { BaseChatMemory } from "./chat_memory.js";
6
6
  export { ChatMessageHistory } from "../stores/message/in_memory.js";
7
- export { MotorheadMemory } from "./motorhead_memory.js";
7
+ export { MotorheadMemory, } from "./motorhead_memory.js";
8
8
  export { VectorStoreRetrieverMemory, } from "./vector_store.js";
9
9
  export { EntityMemory } from "./entity_memory.js";
10
10
  export { ENTITY_MEMORY_CONVERSATION_TEMPLATE } from "./prompt.js";
@@ -1,8 +1,8 @@
1
1
  export { ListOutputParser, CommaSeparatedListOutputParser } from "./list.js";
2
2
  export { RegexParser } from "./regex.js";
3
- export { StructuredOutputParser, AsymmetricStructuredOutputParser, JsonMarkdownStructuredOutputParser, JsonMarkdownFormatInstructionsOptions, JsonMarkdownStructuredOutputParserInput, } from "./structured.js";
3
+ export { StructuredOutputParser, AsymmetricStructuredOutputParser, JsonMarkdownStructuredOutputParser, type JsonMarkdownFormatInstructionsOptions, type JsonMarkdownStructuredOutputParserInput, } from "./structured.js";
4
4
  export { OutputFixingParser } from "./fix.js";
5
5
  export { CombiningOutputParser } from "./combining.js";
6
- export { RouterOutputParser, RouterOutputParserInput } from "./router.js";
6
+ export { RouterOutputParser, type RouterOutputParserInput } from "./router.js";
7
7
  export { CustomListOutputParser } from "./list.js";
8
- export { FunctionParameters, OutputFunctionsParser, JsonOutputFunctionsParser, JsonKeyOutputFunctionsParser, } from "../output_parsers/openai_functions.js";
8
+ export { type FunctionParameters, OutputFunctionsParser, JsonOutputFunctionsParser, JsonKeyOutputFunctionsParser, } from "../output_parsers/openai_functions.js";
@@ -1,10 +1,10 @@
1
- export { BaseExampleSelector, BasePromptTemplate, BasePromptTemplateInput, StringPromptValue, BaseStringPromptTemplate, } from "./base.js";
2
- export { PromptTemplate, PromptTemplateInput } from "./prompt.js";
1
+ export { BaseExampleSelector, BasePromptTemplate, type BasePromptTemplateInput, StringPromptValue, BaseStringPromptTemplate, } from "./base.js";
2
+ export { PromptTemplate, type PromptTemplateInput } from "./prompt.js";
3
3
  export { BasePromptSelector, ConditionalPromptSelector, isChatModel, isLLM, } from "./selectors/conditional.js";
4
- export { LengthBasedExampleSelector, LengthBasedExampleSelectorInput, } from "./selectors/LengthBasedExampleSelector.js";
5
- export { SemanticSimilarityExampleSelector, SemanticSimilarityExampleSelectorInput, } from "./selectors/SemanticSimilarityExampleSelector.js";
6
- export { FewShotPromptTemplate, FewShotPromptTemplateInput, } from "./few_shot.js";
4
+ export { LengthBasedExampleSelector, type LengthBasedExampleSelectorInput, } from "./selectors/LengthBasedExampleSelector.js";
5
+ export { SemanticSimilarityExampleSelector, type SemanticSimilarityExampleSelectorInput, } from "./selectors/SemanticSimilarityExampleSelector.js";
6
+ export { FewShotPromptTemplate, type FewShotPromptTemplateInput, } from "./few_shot.js";
7
7
  export { ChatPromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, SystemMessagePromptTemplate, ChatMessagePromptTemplate, MessagesPlaceholder, BaseChatPromptTemplate, } from "./chat.js";
8
- export { SerializedPromptTemplate, SerializedBasePromptTemplate, SerializedFewShotTemplate, } from "./serde.js";
9
- export { parseTemplate, renderTemplate, checkValidTemplate, TemplateFormat, } from "./template.js";
10
- export { PipelinePromptParams, PipelinePromptTemplate, PipelinePromptTemplateInput, } from "./pipeline.js";
8
+ export { type SerializedPromptTemplate, type SerializedBasePromptTemplate, type SerializedFewShotTemplate, } from "./serde.js";
9
+ export { parseTemplate, renderTemplate, checkValidTemplate, type TemplateFormat, } from "./template.js";
10
+ export { type PipelinePromptParams, PipelinePromptTemplate, type PipelinePromptTemplateInput, } from "./pipeline.js";
@@ -1,3 +1,3 @@
1
- export { RemoteRetriever, RemoteRetrieverParams, RemoteRetrieverAuth, RemoteRetrieverValues, } from "./base.js";
2
- export { ChatGPTPluginRetriever, ChatGPTPluginRetrieverFilter, ChatGPTPluginRetrieverParams, } from "./chatgpt-plugin.js";
3
- export { RemoteLangChainRetriever, RemoteLangChainRetrieverParams, } from "./remote-retriever.js";
1
+ export { RemoteRetriever, type RemoteRetrieverParams, type RemoteRetrieverAuth, type RemoteRetrieverValues, } from "./base.js";
2
+ export { ChatGPTPluginRetriever, type ChatGPTPluginRetrieverFilter, type ChatGPTPluginRetrieverParams, } from "./chatgpt-plugin.js";
3
+ export { RemoteLangChainRetriever, type RemoteLangChainRetrieverParams, } from "./remote-retriever.js";
@@ -1,5 +1,5 @@
1
- export { RunnableFunc, RunnableLike, RunnableBatchOptions, RunnableRetryFailedAttemptHandler, Runnable, RunnableBindingArgs, RunnableBinding, RunnableEach, RunnableRetry, RunnableSequence, RunnableMap, RunnableLambda, RunnableWithFallbacks, } from "./base.js";
2
- export { RunnableConfig } from "./config.js";
1
+ export { type RunnableFunc, type RunnableLike, type RunnableBatchOptions, type RunnableRetryFailedAttemptHandler, Runnable, type RunnableBindingArgs, RunnableBinding, RunnableEach, RunnableRetry, RunnableSequence, RunnableMap, RunnableLambda, RunnableWithFallbacks, } from "./base.js";
2
+ export type { RunnableConfig } from "./config.js";
3
3
  export { RunnablePassthrough } from "./passthrough.js";
4
4
  export { RouterRunnable } from "./router.js";
5
- export { RunnableBranch, Branch, BranchLike } from "./branch.js";
5
+ export { RunnableBranch, type Branch, type BranchLike } from "./branch.js";
package/dist/sql_db.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import type { DataSource as DataSourceT, DataSourceOptions } from "typeorm";
2
2
  import { SerializedSqlDatabase, SqlDatabaseDataSourceParams, SqlDatabaseOptionsParams, SqlTable } from "./util/sql_utils.js";
3
3
  import { Serializable } from "./load/serializable.js";
4
- export { SqlDatabaseDataSourceParams, SqlDatabaseOptionsParams };
4
+ export type { SqlDatabaseDataSourceParams, SqlDatabaseOptionsParams };
5
5
  export declare class SqlDatabase extends Serializable implements SqlDatabaseOptionsParams, SqlDatabaseDataSourceParams {
6
6
  lc_namespace: string[];
7
7
  toJSON(): import("./load/serializable.js").SerializedNotImplemented;
@@ -1,21 +1,21 @@
1
- export { SerpAPI, SerpAPIParameters } from "./serpapi.js";
1
+ export { SerpAPI, type SerpAPIParameters } from "./serpapi.js";
2
2
  export { DadJokeAPI } from "./dadjokeapi.js";
3
3
  export { BingSerpAPI } from "./bingserpapi.js";
4
- export { Tool, ToolParams, StructuredTool } from "./base.js";
5
- export { DynamicTool, DynamicToolInput, DynamicStructuredTool, DynamicStructuredToolInput, } from "./dynamic.js";
4
+ export { Tool, type ToolParams, StructuredTool } from "./base.js";
5
+ export { DynamicTool, type DynamicToolInput, DynamicStructuredTool, type DynamicStructuredToolInput, } from "./dynamic.js";
6
6
  export { IFTTTWebhook } from "./IFTTTWebhook.js";
7
- export { ChainTool, ChainToolInput } from "./chain.js";
8
- export { JsonSpec, JsonListKeysTool, JsonGetValueTool, JsonObject, Json, } from "./json.js";
7
+ export { ChainTool, type ChainToolInput } from "./chain.js";
8
+ export { JsonSpec, JsonListKeysTool, JsonGetValueTool, type JsonObject, type Json, } from "./json.js";
9
9
  export { RequestsGetTool, RequestsPostTool } from "./requests.js";
10
10
  export { VectorStoreQATool } from "./vectorstore.js";
11
- export { ZapierNLARunAction, ZapierNLAWrapper, ZapierNLAWrapperParams, } from "./zapier.js";
12
- export { Serper, SerperParameters } from "./serper.js";
13
- export { GoogleCustomSearch, GoogleCustomSearchParams, } from "./google_custom_search.js";
11
+ export { ZapierNLARunAction, ZapierNLAWrapper, type ZapierNLAWrapperParams, } from "./zapier.js";
12
+ export { Serper, type SerperParameters } from "./serper.js";
13
+ export { GoogleCustomSearch, type GoogleCustomSearchParams, } from "./google_custom_search.js";
14
14
  export { AIPluginTool } from "./aiplugin.js";
15
15
  export { ReadFileTool, WriteFileTool } from "./fs.js";
16
- export { BraveSearch, BraveSearchParams } from "./brave_search.js";
17
- export { WikipediaQueryRun, WikipediaQueryRunParams, } from "./wikipedia_query_run.js";
16
+ export { BraveSearch, type BraveSearchParams } from "./brave_search.js";
17
+ export { WikipediaQueryRun, type WikipediaQueryRunParams, } from "./wikipedia_query_run.js";
18
18
  export { WolframAlphaTool } from "./wolframalpha.js";
19
- export { DataForSeoAPISearch, DataForSeoApiConfig, } from "./dataforseo_api_search.js";
19
+ export { DataForSeoAPISearch, type DataForSeoApiConfig, } from "./dataforseo_api_search.js";
20
20
  export { SearxngSearch } from "./searxng_search.js";
21
- export { SearchApi, SearchApiParameters } from "./searchapi.js";
21
+ export { SearchApi, type SearchApiParameters } from "./searchapi.js";
@@ -0,0 +1,54 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.BedrockLLMInputOutputAdapter = void 0;
4
+ /**
5
+ * A helper class used within the `Bedrock` class. It is responsible for
6
+ * preparing the input and output for the Bedrock service. It formats the
7
+ * input prompt based on the provider (e.g., "anthropic", "ai21",
8
+ * "amazon") and extracts the generated text from the service response.
9
+ */
10
+ class BedrockLLMInputOutputAdapter {
11
+ /** Adapter class to prepare the inputs from Langchain to a format
12
+ that LLM model expects. Also, provides a helper function to extract
13
+ the generated text from the model response. */
14
+ static prepareInput(provider, prompt, maxTokens = 50, temperature = 0, stopSequences = undefined, modelKwargs = {}) {
15
+ const inputBody = {};
16
+ if (provider === "anthropic") {
17
+ inputBody.prompt = prompt;
18
+ inputBody.max_tokens_to_sample = maxTokens;
19
+ inputBody.temperature = temperature;
20
+ inputBody.stop_sequences = stopSequences;
21
+ }
22
+ else if (provider === "ai21") {
23
+ inputBody.prompt = prompt;
24
+ inputBody.maxTokens = maxTokens;
25
+ inputBody.temperature = temperature;
26
+ inputBody.stopSequences = stopSequences;
27
+ }
28
+ else if (provider === "amazon") {
29
+ inputBody.inputText = prompt;
30
+ inputBody.textGenerationConfig = {
31
+ maxTokenCount: maxTokens,
32
+ temperature,
33
+ };
34
+ }
35
+ return { ...inputBody, ...modelKwargs };
36
+ }
37
+ /**
38
+ * Extracts the generated text from the service response.
39
+ * @param provider The provider name.
40
+ * @param responseBody The response body from the service.
41
+ * @returns The generated text.
42
+ */
43
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
44
+ static prepareOutput(provider, responseBody) {
45
+ if (provider === "anthropic") {
46
+ return responseBody.completion;
47
+ }
48
+ else if (provider === "ai21") {
49
+ return responseBody?.completions?.[0]?.data?.text ?? "";
50
+ }
51
+ return responseBody.outputText;
52
+ }
53
+ }
54
+ exports.BedrockLLMInputOutputAdapter = BedrockLLMInputOutputAdapter;
@@ -0,0 +1,59 @@
1
+ import type { AwsCredentialIdentity, Provider } from "@aws-sdk/types";
2
+ export type CredentialType = AwsCredentialIdentity | Provider<AwsCredentialIdentity>;
3
+ /** Bedrock models.
4
+ To authenticate, the AWS client uses the following methods to automatically load credentials:
5
+ https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
6
+ If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used.
7
+ Make sure the credentials / roles used have the required policies to access the Bedrock service.
8
+ */
9
+ export interface BaseBedrockInput {
10
+ /** Model to use.
11
+ For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api.
12
+ */
13
+ model: string;
14
+ /** The AWS region e.g. `us-west-2`.
15
+ Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
16
+ */
17
+ region?: string;
18
+ /** AWS Credentials.
19
+ If no credentials are provided, the default credentials from `@aws-sdk/credential-provider-node` will be used.
20
+ */
21
+ credentials?: CredentialType;
22
+ /** Temperature. */
23
+ temperature?: number;
24
+ /** Max tokens. */
25
+ maxTokens?: number;
26
+ /** A custom fetch function for low-level access to AWS API. Defaults to fetch(). */
27
+ fetchFn?: typeof fetch;
28
+ /** @deprecated Use endpointHost instead Override the default endpoint url. */
29
+ endpointUrl?: string;
30
+ /** Override the default endpoint hostname. */
31
+ endpointHost?: string;
32
+ /** Optional additional stop sequences to pass to the model. Currently only supported for Anthropic and AI21. */
33
+ stopSequences?: string[];
34
+ /** Additional kwargs to pass to the model. */
35
+ modelKwargs?: Record<string, unknown>;
36
+ }
37
+ type Dict = {
38
+ [key: string]: unknown;
39
+ };
40
+ /**
41
+ * A helper class used within the `Bedrock` class. It is responsible for
42
+ * preparing the input and output for the Bedrock service. It formats the
43
+ * input prompt based on the provider (e.g., "anthropic", "ai21",
44
+ * "amazon") and extracts the generated text from the service response.
45
+ */
46
+ export declare class BedrockLLMInputOutputAdapter {
47
+ /** Adapter class to prepare the inputs from Langchain to a format
48
+ that LLM model expects. Also, provides a helper function to extract
49
+ the generated text from the model response. */
50
+ static prepareInput(provider: string, prompt: string, maxTokens?: number, temperature?: number, stopSequences?: string[] | undefined, modelKwargs?: Record<string, unknown>): Dict;
51
+ /**
52
+ * Extracts the generated text from the service response.
53
+ * @param provider The provider name.
54
+ * @param responseBody The response body from the service.
55
+ * @returns The generated text.
56
+ */
57
+ static prepareOutput(provider: string, responseBody: any): string;
58
+ }
59
+ export {};
@@ -0,0 +1,50 @@
1
+ /**
2
+ * A helper class used within the `Bedrock` class. It is responsible for
3
+ * preparing the input and output for the Bedrock service. It formats the
4
+ * input prompt based on the provider (e.g., "anthropic", "ai21",
5
+ * "amazon") and extracts the generated text from the service response.
6
+ */
7
+ export class BedrockLLMInputOutputAdapter {
8
+ /** Adapter class to prepare the inputs from Langchain to a format
9
+ that LLM model expects. Also, provides a helper function to extract
10
+ the generated text from the model response. */
11
+ static prepareInput(provider, prompt, maxTokens = 50, temperature = 0, stopSequences = undefined, modelKwargs = {}) {
12
+ const inputBody = {};
13
+ if (provider === "anthropic") {
14
+ inputBody.prompt = prompt;
15
+ inputBody.max_tokens_to_sample = maxTokens;
16
+ inputBody.temperature = temperature;
17
+ inputBody.stop_sequences = stopSequences;
18
+ }
19
+ else if (provider === "ai21") {
20
+ inputBody.prompt = prompt;
21
+ inputBody.maxTokens = maxTokens;
22
+ inputBody.temperature = temperature;
23
+ inputBody.stopSequences = stopSequences;
24
+ }
25
+ else if (provider === "amazon") {
26
+ inputBody.inputText = prompt;
27
+ inputBody.textGenerationConfig = {
28
+ maxTokenCount: maxTokens,
29
+ temperature,
30
+ };
31
+ }
32
+ return { ...inputBody, ...modelKwargs };
33
+ }
34
+ /**
35
+ * Extracts the generated text from the service response.
36
+ * @param provider The provider name.
37
+ * @param responseBody The response body from the service.
38
+ * @returns The generated text.
39
+ */
40
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
41
+ static prepareOutput(provider, responseBody) {
42
+ if (provider === "anthropic") {
43
+ return responseBody.completion;
44
+ }
45
+ else if (provider === "ai21") {
46
+ return responseBody?.completions?.[0]?.data?.text ?? "";
47
+ }
48
+ return responseBody.outputText;
49
+ }
50
+ }