langchain 0.1.24 → 0.1.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/experimental/chat_models/anthropic_functions.cjs +1 -0
- package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -0
- package/dist/experimental/chat_models/anthropic_functions.js +1 -0
- package/dist/llms/bedrock/index.cjs +1 -0
- package/dist/llms/bedrock/index.d.ts +1 -2
- package/dist/llms/bedrock/index.js +1 -0
- package/dist/util/sql_utils.cjs +2 -1
- package/dist/util/sql_utils.js +2 -1
- package/package.json +7 -7
- package/dist/util/bedrock.cjs +0 -75
- package/dist/util/bedrock.d.ts +0 -64
- package/dist/util/bedrock.js +0 -71
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
⚡ Building applications with LLMs through composability ⚡
|
|
4
4
|
|
|
5
|
-
[](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml)  [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai) [](https://discord.gg/6adMQxSpJS) [](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs)
|
|
6
6
|
[<img src="https://github.com/codespaces/badge.svg" title="Open in Github Codespace" width="150" height="20">](https://codespaces.new/langchain-ai/langchainjs)
|
|
7
7
|
|
|
8
8
|
Looking for the Python version? Check out [LangChain](https://github.com/langchain-ai/langchain).
|
|
@@ -28,6 +28,7 @@ for the weather in SF you would respond:
|
|
|
28
28
|
|
|
29
29
|
<tool>search</tool><tool_input><query>weather in SF</query></tool_input>
|
|
30
30
|
<observation>64 degrees</observation>`);
|
|
31
|
+
/** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
|
|
31
32
|
class AnthropicFunctions extends chat_models_1.BaseChatModel {
|
|
32
33
|
static lc_name() {
|
|
33
34
|
return "AnthropicFunctions";
|
|
@@ -6,13 +6,16 @@ import { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
|
|
|
6
6
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
7
7
|
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
8
8
|
import { type AnthropicInput } from "../../chat_models/anthropic.js";
|
|
9
|
+
/** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
|
|
9
10
|
export interface ChatAnthropicFunctionsCallOptions extends BaseFunctionCallOptions {
|
|
10
11
|
tools?: StructuredToolInterface[];
|
|
11
12
|
}
|
|
13
|
+
/** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
|
|
12
14
|
export type AnthropicFunctionsInput = Partial<AnthropicInput> & BaseChatModelParams & {
|
|
13
15
|
llm?: BaseChatModel;
|
|
14
16
|
systemPromptTemplate?: BasePromptTemplate;
|
|
15
17
|
};
|
|
18
|
+
/** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
|
|
16
19
|
export declare class AnthropicFunctions extends BaseChatModel<ChatAnthropicFunctionsCallOptions> {
|
|
17
20
|
llm: BaseChatModel;
|
|
18
21
|
stopSequences?: string[];
|
|
@@ -25,6 +25,7 @@ for the weather in SF you would respond:
|
|
|
25
25
|
|
|
26
26
|
<tool>search</tool><tool_input><query>weather in SF</query></tool_input>
|
|
27
27
|
<observation>64 degrees</observation>`);
|
|
28
|
+
/** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
|
|
28
29
|
export class AnthropicFunctions extends BaseChatModel {
|
|
29
30
|
static lc_name() {
|
|
30
31
|
return "AnthropicFunctions";
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import { BaseLLMParams } from "@langchain/core/language_models/llms";
|
|
2
|
-
import { BaseBedrockInput } from "../../util/bedrock.js";
|
|
3
2
|
import { Bedrock as BaseBedrock } from "./web.js";
|
|
4
3
|
export declare class Bedrock extends BaseBedrock {
|
|
5
4
|
static lc_name(): string;
|
|
6
|
-
constructor(fields?: Partial<
|
|
5
|
+
constructor(fields?: Partial<any> & BaseLLMParams);
|
|
7
6
|
}
|
package/dist/util/sql_utils.cjs
CHANGED
|
@@ -83,7 +83,8 @@ const getTableAndColumnsName = async (appDataSource) => {
|
|
|
83
83
|
const rep = await appDataSource.query(sql);
|
|
84
84
|
return formatToSqlTable(rep);
|
|
85
85
|
}
|
|
86
|
-
if (appDataSource.options.type === "mysql"
|
|
86
|
+
if (appDataSource.options.type === "mysql" ||
|
|
87
|
+
appDataSource.options.type === "aurora-mysql") {
|
|
87
88
|
sql =
|
|
88
89
|
"SELECT " +
|
|
89
90
|
"TABLE_NAME AS table_name, " +
|
package/dist/util/sql_utils.js
CHANGED
|
@@ -77,7 +77,8 @@ export const getTableAndColumnsName = async (appDataSource) => {
|
|
|
77
77
|
const rep = await appDataSource.query(sql);
|
|
78
78
|
return formatToSqlTable(rep);
|
|
79
79
|
}
|
|
80
|
-
if (appDataSource.options.type === "mysql"
|
|
80
|
+
if (appDataSource.options.type === "mysql" ||
|
|
81
|
+
appDataSource.options.type === "aurora-mysql") {
|
|
81
82
|
sql =
|
|
82
83
|
"SELECT " +
|
|
83
84
|
"TABLE_NAME AS table_name, " +
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langchain",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.26",
|
|
4
4
|
"description": "Typescript bindings for langchain",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -1301,7 +1301,7 @@
|
|
|
1301
1301
|
"web-auth-library": "^1.0.3",
|
|
1302
1302
|
"wikipedia": "^2.1.2",
|
|
1303
1303
|
"youtube-transcript": "^1.0.6",
|
|
1304
|
-
"youtubei.js": "^
|
|
1304
|
+
"youtubei.js": "^9.1.0"
|
|
1305
1305
|
},
|
|
1306
1306
|
"peerDependencies": {
|
|
1307
1307
|
"@aws-sdk/client-s3": "^3.310.0",
|
|
@@ -1328,7 +1328,7 @@
|
|
|
1328
1328
|
"couchbase": "^4.2.10",
|
|
1329
1329
|
"d3-dsv": "^2.0.0",
|
|
1330
1330
|
"epub2": "^3.0.1",
|
|
1331
|
-
"fast-xml-parser": "
|
|
1331
|
+
"fast-xml-parser": "*",
|
|
1332
1332
|
"google-auth-library": "^8.9.0",
|
|
1333
1333
|
"handlebars": "^4.7.8",
|
|
1334
1334
|
"html-to-text": "^9.0.5",
|
|
@@ -1353,7 +1353,7 @@
|
|
|
1353
1353
|
"web-auth-library": "^1.0.3",
|
|
1354
1354
|
"ws": "^8.14.2",
|
|
1355
1355
|
"youtube-transcript": "^1.0.6",
|
|
1356
|
-
"youtubei.js": "^
|
|
1356
|
+
"youtubei.js": "^9.1.0"
|
|
1357
1357
|
},
|
|
1358
1358
|
"peerDependenciesMeta": {
|
|
1359
1359
|
"@aws-sdk/client-s3": {
|
|
@@ -1512,9 +1512,9 @@
|
|
|
1512
1512
|
},
|
|
1513
1513
|
"dependencies": {
|
|
1514
1514
|
"@anthropic-ai/sdk": "^0.9.1",
|
|
1515
|
-
"@langchain/community": "~0.0.
|
|
1516
|
-
"@langchain/core": "~0.1.
|
|
1517
|
-
"@langchain/openai": "~0.0.
|
|
1515
|
+
"@langchain/community": "~0.0.36",
|
|
1516
|
+
"@langchain/core": "~0.1.44",
|
|
1517
|
+
"@langchain/openai": "~0.0.19",
|
|
1518
1518
|
"binary-extensions": "^2.2.0",
|
|
1519
1519
|
"expr-eval": "^2.0.2",
|
|
1520
1520
|
"js-tiktoken": "^1.0.7",
|
package/dist/util/bedrock.cjs
DELETED
|
@@ -1,75 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.BedrockLLMInputOutputAdapter = void 0;
|
|
4
|
-
/**
|
|
5
|
-
* A helper class used within the `Bedrock` class. It is responsible for
|
|
6
|
-
* preparing the input and output for the Bedrock service. It formats the
|
|
7
|
-
* input prompt based on the provider (e.g., "anthropic", "ai21",
|
|
8
|
-
* "amazon") and extracts the generated text from the service response.
|
|
9
|
-
*/
|
|
10
|
-
class BedrockLLMInputOutputAdapter {
|
|
11
|
-
/** Adapter class to prepare the inputs from Langchain to a format
|
|
12
|
-
that LLM model expects. Also, provides a helper function to extract
|
|
13
|
-
the generated text from the model response. */
|
|
14
|
-
static prepareInput(provider, prompt, maxTokens = 50, temperature = 0, stopSequences = undefined, modelKwargs = {}, bedrockMethod = "invoke") {
|
|
15
|
-
const inputBody = {};
|
|
16
|
-
if (provider === "anthropic") {
|
|
17
|
-
inputBody.prompt = prompt;
|
|
18
|
-
inputBody.max_tokens_to_sample = maxTokens;
|
|
19
|
-
inputBody.temperature = temperature;
|
|
20
|
-
inputBody.stop_sequences = stopSequences;
|
|
21
|
-
}
|
|
22
|
-
else if (provider === "ai21") {
|
|
23
|
-
inputBody.prompt = prompt;
|
|
24
|
-
inputBody.maxTokens = maxTokens;
|
|
25
|
-
inputBody.temperature = temperature;
|
|
26
|
-
inputBody.stopSequences = stopSequences;
|
|
27
|
-
}
|
|
28
|
-
else if (provider === "meta") {
|
|
29
|
-
inputBody.prompt = prompt;
|
|
30
|
-
inputBody.max_gen_len = maxTokens;
|
|
31
|
-
inputBody.temperature = temperature;
|
|
32
|
-
}
|
|
33
|
-
else if (provider === "amazon") {
|
|
34
|
-
inputBody.inputText = prompt;
|
|
35
|
-
inputBody.textGenerationConfig = {
|
|
36
|
-
maxTokenCount: maxTokens,
|
|
37
|
-
temperature,
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
else if (provider === "cohere") {
|
|
41
|
-
inputBody.prompt = prompt;
|
|
42
|
-
inputBody.max_tokens = maxTokens;
|
|
43
|
-
inputBody.temperature = temperature;
|
|
44
|
-
inputBody.stop_sequences = stopSequences;
|
|
45
|
-
if (bedrockMethod === "invoke-with-response-stream") {
|
|
46
|
-
inputBody.stream = true;
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
return { ...inputBody, ...modelKwargs };
|
|
50
|
-
}
|
|
51
|
-
/**
|
|
52
|
-
* Extracts the generated text from the service response.
|
|
53
|
-
* @param provider The provider name.
|
|
54
|
-
* @param responseBody The response body from the service.
|
|
55
|
-
* @returns The generated text.
|
|
56
|
-
*/
|
|
57
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
58
|
-
static prepareOutput(provider, responseBody) {
|
|
59
|
-
if (provider === "anthropic") {
|
|
60
|
-
return responseBody.completion;
|
|
61
|
-
}
|
|
62
|
-
else if (provider === "ai21") {
|
|
63
|
-
return responseBody?.completions?.[0]?.data?.text ?? "";
|
|
64
|
-
}
|
|
65
|
-
else if (provider === "cohere") {
|
|
66
|
-
return responseBody?.generations?.[0]?.text ?? responseBody?.text ?? "";
|
|
67
|
-
}
|
|
68
|
-
else if (provider === "meta") {
|
|
69
|
-
return responseBody.generation;
|
|
70
|
-
}
|
|
71
|
-
// I haven't been able to get a response with more than one result in it.
|
|
72
|
-
return responseBody.results?.[0]?.outputText;
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
exports.BedrockLLMInputOutputAdapter = BedrockLLMInputOutputAdapter;
|
package/dist/util/bedrock.d.ts
DELETED
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
import type { AwsCredentialIdentity, Provider } from "@aws-sdk/types";
|
|
2
|
-
export type CredentialType = AwsCredentialIdentity | Provider<AwsCredentialIdentity>;
|
|
3
|
-
/** Bedrock models.
|
|
4
|
-
To authenticate, the AWS client uses the following methods to automatically load credentials:
|
|
5
|
-
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
|
|
6
|
-
If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used.
|
|
7
|
-
Make sure the credentials / roles used have the required policies to access the Bedrock service.
|
|
8
|
-
*/
|
|
9
|
-
export interface BaseBedrockInput {
|
|
10
|
-
/** Model to use.
|
|
11
|
-
For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api.
|
|
12
|
-
*/
|
|
13
|
-
model: string;
|
|
14
|
-
/** The AWS region e.g. `us-west-2`.
|
|
15
|
-
Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
|
|
16
|
-
*/
|
|
17
|
-
region?: string;
|
|
18
|
-
/** AWS Credentials.
|
|
19
|
-
If no credentials are provided, the default credentials from `@aws-sdk/credential-provider-node` will be used.
|
|
20
|
-
*/
|
|
21
|
-
credentials?: CredentialType;
|
|
22
|
-
/** Temperature. */
|
|
23
|
-
temperature?: number;
|
|
24
|
-
/** Max tokens. */
|
|
25
|
-
maxTokens?: number;
|
|
26
|
-
/** A custom fetch function for low-level access to AWS API. Defaults to fetch(). */
|
|
27
|
-
fetchFn?: typeof fetch;
|
|
28
|
-
/** @deprecated Use endpointHost instead Override the default endpoint url. */
|
|
29
|
-
endpointUrl?: string;
|
|
30
|
-
/** Override the default endpoint hostname. */
|
|
31
|
-
endpointHost?: string;
|
|
32
|
-
/**
|
|
33
|
-
* Optional additional stop sequences to pass to the model. Currently only supported for Anthropic and AI21.
|
|
34
|
-
* @deprecated Use .bind({ "stop": [...] }) instead
|
|
35
|
-
* */
|
|
36
|
-
stopSequences?: string[];
|
|
37
|
-
/** Additional kwargs to pass to the model. */
|
|
38
|
-
modelKwargs?: Record<string, unknown>;
|
|
39
|
-
/** Whether or not to stream responses */
|
|
40
|
-
streaming: boolean;
|
|
41
|
-
}
|
|
42
|
-
type Dict = {
|
|
43
|
-
[key: string]: unknown;
|
|
44
|
-
};
|
|
45
|
-
/**
|
|
46
|
-
* A helper class used within the `Bedrock` class. It is responsible for
|
|
47
|
-
* preparing the input and output for the Bedrock service. It formats the
|
|
48
|
-
* input prompt based on the provider (e.g., "anthropic", "ai21",
|
|
49
|
-
* "amazon") and extracts the generated text from the service response.
|
|
50
|
-
*/
|
|
51
|
-
export declare class BedrockLLMInputOutputAdapter {
|
|
52
|
-
/** Adapter class to prepare the inputs from Langchain to a format
|
|
53
|
-
that LLM model expects. Also, provides a helper function to extract
|
|
54
|
-
the generated text from the model response. */
|
|
55
|
-
static prepareInput(provider: string, prompt: string, maxTokens?: number, temperature?: number, stopSequences?: string[] | undefined, modelKwargs?: Record<string, unknown>, bedrockMethod?: "invoke" | "invoke-with-response-stream"): Dict;
|
|
56
|
-
/**
|
|
57
|
-
* Extracts the generated text from the service response.
|
|
58
|
-
* @param provider The provider name.
|
|
59
|
-
* @param responseBody The response body from the service.
|
|
60
|
-
* @returns The generated text.
|
|
61
|
-
*/
|
|
62
|
-
static prepareOutput(provider: string, responseBody: any): string;
|
|
63
|
-
}
|
|
64
|
-
export {};
|
package/dist/util/bedrock.js
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* A helper class used within the `Bedrock` class. It is responsible for
|
|
3
|
-
* preparing the input and output for the Bedrock service. It formats the
|
|
4
|
-
* input prompt based on the provider (e.g., "anthropic", "ai21",
|
|
5
|
-
* "amazon") and extracts the generated text from the service response.
|
|
6
|
-
*/
|
|
7
|
-
export class BedrockLLMInputOutputAdapter {
|
|
8
|
-
/** Adapter class to prepare the inputs from Langchain to a format
|
|
9
|
-
that LLM model expects. Also, provides a helper function to extract
|
|
10
|
-
the generated text from the model response. */
|
|
11
|
-
static prepareInput(provider, prompt, maxTokens = 50, temperature = 0, stopSequences = undefined, modelKwargs = {}, bedrockMethod = "invoke") {
|
|
12
|
-
const inputBody = {};
|
|
13
|
-
if (provider === "anthropic") {
|
|
14
|
-
inputBody.prompt = prompt;
|
|
15
|
-
inputBody.max_tokens_to_sample = maxTokens;
|
|
16
|
-
inputBody.temperature = temperature;
|
|
17
|
-
inputBody.stop_sequences = stopSequences;
|
|
18
|
-
}
|
|
19
|
-
else if (provider === "ai21") {
|
|
20
|
-
inputBody.prompt = prompt;
|
|
21
|
-
inputBody.maxTokens = maxTokens;
|
|
22
|
-
inputBody.temperature = temperature;
|
|
23
|
-
inputBody.stopSequences = stopSequences;
|
|
24
|
-
}
|
|
25
|
-
else if (provider === "meta") {
|
|
26
|
-
inputBody.prompt = prompt;
|
|
27
|
-
inputBody.max_gen_len = maxTokens;
|
|
28
|
-
inputBody.temperature = temperature;
|
|
29
|
-
}
|
|
30
|
-
else if (provider === "amazon") {
|
|
31
|
-
inputBody.inputText = prompt;
|
|
32
|
-
inputBody.textGenerationConfig = {
|
|
33
|
-
maxTokenCount: maxTokens,
|
|
34
|
-
temperature,
|
|
35
|
-
};
|
|
36
|
-
}
|
|
37
|
-
else if (provider === "cohere") {
|
|
38
|
-
inputBody.prompt = prompt;
|
|
39
|
-
inputBody.max_tokens = maxTokens;
|
|
40
|
-
inputBody.temperature = temperature;
|
|
41
|
-
inputBody.stop_sequences = stopSequences;
|
|
42
|
-
if (bedrockMethod === "invoke-with-response-stream") {
|
|
43
|
-
inputBody.stream = true;
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
return { ...inputBody, ...modelKwargs };
|
|
47
|
-
}
|
|
48
|
-
/**
|
|
49
|
-
* Extracts the generated text from the service response.
|
|
50
|
-
* @param provider The provider name.
|
|
51
|
-
* @param responseBody The response body from the service.
|
|
52
|
-
* @returns The generated text.
|
|
53
|
-
*/
|
|
54
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
55
|
-
static prepareOutput(provider, responseBody) {
|
|
56
|
-
if (provider === "anthropic") {
|
|
57
|
-
return responseBody.completion;
|
|
58
|
-
}
|
|
59
|
-
else if (provider === "ai21") {
|
|
60
|
-
return responseBody?.completions?.[0]?.data?.text ?? "";
|
|
61
|
-
}
|
|
62
|
-
else if (provider === "cohere") {
|
|
63
|
-
return responseBody?.generations?.[0]?.text ?? responseBody?.text ?? "";
|
|
64
|
-
}
|
|
65
|
-
else if (provider === "meta") {
|
|
66
|
-
return responseBody.generation;
|
|
67
|
-
}
|
|
68
|
-
// I haven't been able to get a response with more than one result in it.
|
|
69
|
-
return responseBody.results?.[0]?.outputText;
|
|
70
|
-
}
|
|
71
|
-
}
|