langchain 0.3.12 → 0.3.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/mrkl/index.cjs +1 -1
- package/dist/agents/mrkl/index.js +1 -1
- package/dist/chains/graph_qa/prompts.d.ts +1 -1
- package/dist/chains/sql_db/sql_db_chain.cjs +3 -3
- package/dist/chains/sql_db/sql_db_chain.d.ts +1 -1
- package/dist/chains/sql_db/sql_db_chain.js +3 -3
- package/dist/chains/sql_db/sql_db_prompt.d.ts +7 -7
- package/dist/chat_models/universal.cjs +1 -0
- package/dist/chat_models/universal.js +1 -0
- package/dist/evaluation/qa/prompt.d.ts +2 -2
- package/dist/hub/base.cjs +115 -0
- package/dist/{hub.d.ts → hub/base.d.ts} +6 -10
- package/dist/hub/base.js +109 -0
- package/dist/hub/index.cjs +53 -0
- package/dist/hub/index.d.ts +26 -0
- package/dist/hub/index.js +49 -0
- package/dist/hub/node.cjs +53 -0
- package/dist/hub/node.d.ts +18 -0
- package/dist/hub/node.js +50 -0
- package/dist/load/import_constants.cjs +1 -0
- package/dist/load/import_constants.js +1 -0
- package/dist/load/import_map.cjs +13 -1
- package/dist/load/import_map.d.ts +13 -1
- package/dist/load/import_map.js +13 -1
- package/dist/load/index.cjs +4 -2
- package/dist/load/index.d.ts +1 -1
- package/dist/load/index.js +4 -2
- package/hub/node.cjs +1 -0
- package/hub/node.d.cts +1 -0
- package/hub/node.d.ts +1 -0
- package/hub/node.js +1 -0
- package/hub.cjs +1 -1
- package/hub.d.cts +1 -1
- package/hub.d.ts +1 -1
- package/hub.js +1 -1
- package/package.json +15 -2
- package/dist/hub.cjs +0 -52
- package/dist/hub.js +0 -47
|
@@ -92,7 +92,7 @@ class ZeroShotAgent extends agent_js_1.Agent {
|
|
|
92
92
|
const toolStrings = tools
|
|
93
93
|
.map((tool) => `${tool.name}: ${tool.description}`)
|
|
94
94
|
.join("\n");
|
|
95
|
-
const toolNames = tools.map((tool) => tool.name);
|
|
95
|
+
const toolNames = tools.map((tool) => `"${tool.name}"`).join(", ");
|
|
96
96
|
const formatInstructions = (0, prompts_1.renderTemplate)(prompt_js_1.FORMAT_INSTRUCTIONS, "f-string", {
|
|
97
97
|
tool_names: toolNames,
|
|
98
98
|
});
|
|
@@ -89,7 +89,7 @@ export class ZeroShotAgent extends Agent {
|
|
|
89
89
|
const toolStrings = tools
|
|
90
90
|
.map((tool) => `${tool.name}: ${tool.description}`)
|
|
91
91
|
.join("\n");
|
|
92
|
-
const toolNames = tools.map((tool) => tool.name);
|
|
92
|
+
const toolNames = tools.map((tool) => `"${tool.name}"`).join(", ");
|
|
93
93
|
const formatInstructions = renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
|
|
94
94
|
tool_names: toolNames,
|
|
95
95
|
});
|
|
@@ -180,10 +180,10 @@ class SqlDatabaseChain extends base_js_1.BaseChain {
|
|
|
180
180
|
const maxToken = await (0, base_1.calculateMaxTokens)({
|
|
181
181
|
prompt: stringWeSend,
|
|
182
182
|
// Cast here to allow for other models that may not fit the union
|
|
183
|
-
modelName: llm.
|
|
183
|
+
modelName: llm.model,
|
|
184
184
|
});
|
|
185
|
-
if (maxToken < llm.maxTokens) {
|
|
186
|
-
throw new Error(`The combination of the database structure and your question is too big for the model ${llm.
|
|
185
|
+
if (maxToken < (llm.maxTokens ?? -1)) {
|
|
186
|
+
throw new Error(`The combination of the database structure and your question is too big for the model ${llm.model} which can compute only a max tokens of ${(0, base_1.getModelContextSize)(llm.model)}.
|
|
187
187
|
We suggest you to use the includeTables parameters when creating the SqlDatabase object to select only a subset of the tables. You can also use a model which can handle more tokens.`);
|
|
188
188
|
}
|
|
189
189
|
}
|
|
@@ -50,9 +50,9 @@ export declare class SqlDatabaseChain extends BaseChain {
|
|
|
50
50
|
llm: BaseLanguageModelInterface;
|
|
51
51
|
database: SqlDatabase;
|
|
52
52
|
prompt: PromptTemplate<{
|
|
53
|
-
input: any;
|
|
54
53
|
dialect: any;
|
|
55
54
|
table_info: any;
|
|
55
|
+
input: any;
|
|
56
56
|
top_k: any;
|
|
57
57
|
}, any>;
|
|
58
58
|
topK: number;
|
|
@@ -177,10 +177,10 @@ export class SqlDatabaseChain extends BaseChain {
|
|
|
177
177
|
const maxToken = await calculateMaxTokens({
|
|
178
178
|
prompt: stringWeSend,
|
|
179
179
|
// Cast here to allow for other models that may not fit the union
|
|
180
|
-
modelName: llm.
|
|
180
|
+
modelName: llm.model,
|
|
181
181
|
});
|
|
182
|
-
if (maxToken < llm.maxTokens) {
|
|
183
|
-
throw new Error(`The combination of the database structure and your question is too big for the model ${llm.
|
|
182
|
+
if (maxToken < (llm.maxTokens ?? -1)) {
|
|
183
|
+
throw new Error(`The combination of the database structure and your question is too big for the model ${llm.model} which can compute only a max tokens of ${getModelContextSize(llm.model)}.
|
|
184
184
|
We suggest you to use the includeTables parameters when creating the SqlDatabase object to select only a subset of the tables. You can also use a model which can handle more tokens.`);
|
|
185
185
|
}
|
|
186
186
|
}
|
|
@@ -1,44 +1,44 @@
|
|
|
1
1
|
import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts";
|
|
2
2
|
export declare const DEFAULT_SQL_DATABASE_PROMPT: PromptTemplate<{
|
|
3
|
-
input: any;
|
|
4
3
|
dialect: any;
|
|
5
4
|
table_info: any;
|
|
5
|
+
input: any;
|
|
6
6
|
top_k: any;
|
|
7
7
|
}, any>;
|
|
8
8
|
export declare const SQL_POSTGRES_PROMPT: PromptTemplate<{
|
|
9
|
-
input: any;
|
|
10
9
|
dialect: any;
|
|
11
10
|
table_info: any;
|
|
11
|
+
input: any;
|
|
12
12
|
top_k: any;
|
|
13
13
|
}, any>;
|
|
14
14
|
export declare const SQL_SQLITE_PROMPT: PromptTemplate<{
|
|
15
|
-
input: any;
|
|
16
15
|
dialect: any;
|
|
17
16
|
table_info: any;
|
|
17
|
+
input: any;
|
|
18
18
|
top_k: any;
|
|
19
19
|
}, any>;
|
|
20
20
|
export declare const SQL_MYSQL_PROMPT: PromptTemplate<{
|
|
21
|
-
input: any;
|
|
22
21
|
dialect: any;
|
|
23
22
|
table_info: any;
|
|
23
|
+
input: any;
|
|
24
24
|
top_k: any;
|
|
25
25
|
}, any>;
|
|
26
26
|
export declare const SQL_MSSQL_PROMPT: PromptTemplate<{
|
|
27
|
-
input: any;
|
|
28
27
|
dialect: any;
|
|
29
28
|
table_info: any;
|
|
29
|
+
input: any;
|
|
30
30
|
top_k: any;
|
|
31
31
|
}, any>;
|
|
32
32
|
export declare const SQL_SAP_HANA_PROMPT: PromptTemplate<{
|
|
33
|
-
input: any;
|
|
34
33
|
dialect: any;
|
|
35
34
|
table_info: any;
|
|
35
|
+
input: any;
|
|
36
36
|
top_k: any;
|
|
37
37
|
}, any>;
|
|
38
38
|
export declare const SQL_ORACLE_PROMPT: PromptTemplate<{
|
|
39
|
-
input: any;
|
|
40
39
|
dialect: any;
|
|
41
40
|
table_info: any;
|
|
41
|
+
input: any;
|
|
42
42
|
top_k: any;
|
|
43
43
|
}, any>;
|
|
44
44
|
export type SqlDialect = "oracle" | "postgres" | "sqlite" | "mysql" | "mssql" | "sap hana";
|
|
@@ -27,6 +27,7 @@ params = {}) {
|
|
|
27
27
|
if (!modelProviderCopy) {
|
|
28
28
|
throw new Error(`Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`);
|
|
29
29
|
}
|
|
30
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
30
31
|
const { modelProvider: _unused, ...passedParams } = params;
|
|
31
32
|
try {
|
|
32
33
|
switch (modelProviderCopy) {
|
|
@@ -24,6 +24,7 @@ params = {}) {
|
|
|
24
24
|
if (!modelProviderCopy) {
|
|
25
25
|
throw new Error(`Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`);
|
|
26
26
|
}
|
|
27
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
27
28
|
const { modelProvider: _unused, ...passedParams } = params;
|
|
28
29
|
try {
|
|
29
30
|
switch (modelProviderCopy) {
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import { PromptTemplate } from "@langchain/core/prompts";
|
|
2
2
|
export declare const QA_PROMPT: PromptTemplate<{
|
|
3
|
-
answer: any;
|
|
4
3
|
query: any;
|
|
4
|
+
answer: any;
|
|
5
5
|
result: any;
|
|
6
6
|
}, any>;
|
|
7
7
|
export declare const SQL_PROMPT: PromptTemplate<{
|
|
8
|
-
answer: any;
|
|
9
8
|
query: any;
|
|
9
|
+
answer: any;
|
|
10
10
|
result: any;
|
|
11
11
|
}, any>;
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.generateModelImportMap = exports.basePull = exports.basePush = void 0;
|
|
4
|
+
const langsmith_1 = require("langsmith");
|
|
5
|
+
/**
|
|
6
|
+
* Push a prompt to the hub.
|
|
7
|
+
* If the specified repo doesn't already exist, it will be created.
|
|
8
|
+
* @param repoFullName The full name of the repo.
|
|
9
|
+
* @param runnable The prompt to push.
|
|
10
|
+
* @param options
|
|
11
|
+
* @returns The URL of the newly pushed prompt in the hub.
|
|
12
|
+
*/
|
|
13
|
+
async function basePush(repoFullName, runnable, options) {
|
|
14
|
+
const client = new langsmith_1.Client(options);
|
|
15
|
+
const payloadOptions = {
|
|
16
|
+
object: runnable,
|
|
17
|
+
parentCommitHash: options?.parentCommitHash,
|
|
18
|
+
isPublic: options?.isPublic ?? options?.newRepoIsPublic,
|
|
19
|
+
description: options?.description ?? options?.newRepoDescription,
|
|
20
|
+
readme: options?.readme,
|
|
21
|
+
tags: options?.tags,
|
|
22
|
+
};
|
|
23
|
+
return client.pushPrompt(repoFullName, payloadOptions);
|
|
24
|
+
}
|
|
25
|
+
exports.basePush = basePush;
|
|
26
|
+
async function basePull(ownerRepoCommit, options) {
|
|
27
|
+
const client = new langsmith_1.Client(options);
|
|
28
|
+
const promptObject = await client.pullPromptCommit(ownerRepoCommit, {
|
|
29
|
+
includeModel: options?.includeModel,
|
|
30
|
+
});
|
|
31
|
+
if (promptObject.manifest.kwargs?.metadata === undefined) {
|
|
32
|
+
promptObject.manifest.kwargs = {
|
|
33
|
+
...promptObject.manifest.kwargs,
|
|
34
|
+
metadata: {},
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
promptObject.manifest.kwargs.metadata = {
|
|
38
|
+
...promptObject.manifest.kwargs.metadata,
|
|
39
|
+
lc_hub_owner: promptObject.owner,
|
|
40
|
+
lc_hub_repo: promptObject.repo,
|
|
41
|
+
lc_hub_commit_hash: promptObject.commit_hash,
|
|
42
|
+
};
|
|
43
|
+
// Some nested mustache prompts have improperly parsed variables that include a dot.
|
|
44
|
+
if (promptObject.manifest.kwargs.template_format === "mustache") {
|
|
45
|
+
const stripDotNotation = (varName) => varName.split(".")[0];
|
|
46
|
+
const { input_variables } = promptObject.manifest.kwargs;
|
|
47
|
+
if (Array.isArray(input_variables)) {
|
|
48
|
+
promptObject.manifest.kwargs.input_variables =
|
|
49
|
+
input_variables.map(stripDotNotation);
|
|
50
|
+
}
|
|
51
|
+
const { messages } = promptObject.manifest.kwargs;
|
|
52
|
+
if (Array.isArray(messages)) {
|
|
53
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
54
|
+
promptObject.manifest.kwargs.messages = messages.map((message) => {
|
|
55
|
+
const nestedVars = message?.kwargs?.prompt?.kwargs?.input_variables;
|
|
56
|
+
if (Array.isArray(nestedVars)) {
|
|
57
|
+
// eslint-disable-next-line no-param-reassign
|
|
58
|
+
message.kwargs.prompt.kwargs.input_variables =
|
|
59
|
+
nestedVars.map(stripDotNotation);
|
|
60
|
+
}
|
|
61
|
+
return message;
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
return promptObject;
|
|
66
|
+
}
|
|
67
|
+
exports.basePull = basePull;
|
|
68
|
+
function generateModelImportMap(
|
|
69
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
70
|
+
modelClass) {
|
|
71
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
72
|
+
const modelImportMap = {};
|
|
73
|
+
// TODO: Fix in 0.4.0. We can't get lc_id without instantiating the class, so we
|
|
74
|
+
// must put them inline here. In the future, make this less hacky
|
|
75
|
+
// This should probably use dynamic imports and have a web-only entrypoint
|
|
76
|
+
// in a future breaking release
|
|
77
|
+
if (modelClass !== undefined) {
|
|
78
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
79
|
+
const modelLcName = modelClass?.lc_name();
|
|
80
|
+
let importMapKey;
|
|
81
|
+
if (modelLcName === "ChatAnthropic") {
|
|
82
|
+
importMapKey = "chat_models__anthropic";
|
|
83
|
+
}
|
|
84
|
+
else if (modelLcName === "ChatAzureOpenAI") {
|
|
85
|
+
importMapKey = "chat_models__openai";
|
|
86
|
+
}
|
|
87
|
+
else if (modelLcName === "ChatGoogleVertexAI") {
|
|
88
|
+
importMapKey = "chat_models__vertexai";
|
|
89
|
+
}
|
|
90
|
+
else if (modelLcName === "ChatGoogleGenerativeAI") {
|
|
91
|
+
importMapKey = "chat_models__google_genai";
|
|
92
|
+
}
|
|
93
|
+
else if (modelLcName === "ChatBedrockConverse") {
|
|
94
|
+
importMapKey = "chat_models__chat_bedrock_converse";
|
|
95
|
+
}
|
|
96
|
+
else if (modelLcName === "ChatMistral") {
|
|
97
|
+
importMapKey = "chat_models__mistralai";
|
|
98
|
+
}
|
|
99
|
+
else if (modelLcName === "ChatFireworks") {
|
|
100
|
+
importMapKey = "chat_models__fireworks";
|
|
101
|
+
}
|
|
102
|
+
else if (modelLcName === "ChatGroq") {
|
|
103
|
+
importMapKey = "chat_models__groq";
|
|
104
|
+
}
|
|
105
|
+
else {
|
|
106
|
+
throw new Error("Received unsupport model class when pulling prompt.");
|
|
107
|
+
}
|
|
108
|
+
modelImportMap[importMapKey] = {
|
|
109
|
+
...modelImportMap[importMapKey],
|
|
110
|
+
[modelLcName]: modelClass,
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
return modelImportMap;
|
|
114
|
+
}
|
|
115
|
+
exports.generateModelImportMap = generateModelImportMap;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import type { BaseLanguageModel } from "@langchain/core/language_models/base";
|
|
2
|
+
import type { Runnable } from "@langchain/core/runnables";
|
|
2
3
|
/**
|
|
3
4
|
* Push a prompt to the hub.
|
|
4
5
|
* If the specified repo doesn't already exist, it will be created.
|
|
@@ -7,7 +8,7 @@ import { Runnable } from "@langchain/core/runnables";
|
|
|
7
8
|
* @param options
|
|
8
9
|
* @returns The URL of the newly pushed prompt in the hub.
|
|
9
10
|
*/
|
|
10
|
-
export declare function
|
|
11
|
+
export declare function basePush(repoFullName: string, runnable: Runnable, options?: {
|
|
11
12
|
apiKey?: string;
|
|
12
13
|
apiUrl?: string;
|
|
13
14
|
parentCommitHash?: string;
|
|
@@ -20,14 +21,9 @@ export declare function push(repoFullName: string, runnable: Runnable, options?:
|
|
|
20
21
|
readme?: string;
|
|
21
22
|
tags?: string[];
|
|
22
23
|
}): Promise<string>;
|
|
23
|
-
|
|
24
|
-
* Pull a prompt from the hub.
|
|
25
|
-
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
26
|
-
* @param options
|
|
27
|
-
* @returns
|
|
28
|
-
*/
|
|
29
|
-
export declare function pull<T extends Runnable>(ownerRepoCommit: string, options?: {
|
|
24
|
+
export declare function basePull(ownerRepoCommit: string, options?: {
|
|
30
25
|
apiKey?: string;
|
|
31
26
|
apiUrl?: string;
|
|
32
27
|
includeModel?: boolean;
|
|
33
|
-
}): Promise<
|
|
28
|
+
}): Promise<import("langsmith/schemas").PromptCommit>;
|
|
29
|
+
export declare function generateModelImportMap(modelClass?: new (...args: any[]) => BaseLanguageModel): Record<string, any>;
|
package/dist/hub/base.js
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import { Client } from "langsmith";
|
|
2
|
+
/**
|
|
3
|
+
* Push a prompt to the hub.
|
|
4
|
+
* If the specified repo doesn't already exist, it will be created.
|
|
5
|
+
* @param repoFullName The full name of the repo.
|
|
6
|
+
* @param runnable The prompt to push.
|
|
7
|
+
* @param options
|
|
8
|
+
* @returns The URL of the newly pushed prompt in the hub.
|
|
9
|
+
*/
|
|
10
|
+
export async function basePush(repoFullName, runnable, options) {
|
|
11
|
+
const client = new Client(options);
|
|
12
|
+
const payloadOptions = {
|
|
13
|
+
object: runnable,
|
|
14
|
+
parentCommitHash: options?.parentCommitHash,
|
|
15
|
+
isPublic: options?.isPublic ?? options?.newRepoIsPublic,
|
|
16
|
+
description: options?.description ?? options?.newRepoDescription,
|
|
17
|
+
readme: options?.readme,
|
|
18
|
+
tags: options?.tags,
|
|
19
|
+
};
|
|
20
|
+
return client.pushPrompt(repoFullName, payloadOptions);
|
|
21
|
+
}
|
|
22
|
+
export async function basePull(ownerRepoCommit, options) {
|
|
23
|
+
const client = new Client(options);
|
|
24
|
+
const promptObject = await client.pullPromptCommit(ownerRepoCommit, {
|
|
25
|
+
includeModel: options?.includeModel,
|
|
26
|
+
});
|
|
27
|
+
if (promptObject.manifest.kwargs?.metadata === undefined) {
|
|
28
|
+
promptObject.manifest.kwargs = {
|
|
29
|
+
...promptObject.manifest.kwargs,
|
|
30
|
+
metadata: {},
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
promptObject.manifest.kwargs.metadata = {
|
|
34
|
+
...promptObject.manifest.kwargs.metadata,
|
|
35
|
+
lc_hub_owner: promptObject.owner,
|
|
36
|
+
lc_hub_repo: promptObject.repo,
|
|
37
|
+
lc_hub_commit_hash: promptObject.commit_hash,
|
|
38
|
+
};
|
|
39
|
+
// Some nested mustache prompts have improperly parsed variables that include a dot.
|
|
40
|
+
if (promptObject.manifest.kwargs.template_format === "mustache") {
|
|
41
|
+
const stripDotNotation = (varName) => varName.split(".")[0];
|
|
42
|
+
const { input_variables } = promptObject.manifest.kwargs;
|
|
43
|
+
if (Array.isArray(input_variables)) {
|
|
44
|
+
promptObject.manifest.kwargs.input_variables =
|
|
45
|
+
input_variables.map(stripDotNotation);
|
|
46
|
+
}
|
|
47
|
+
const { messages } = promptObject.manifest.kwargs;
|
|
48
|
+
if (Array.isArray(messages)) {
|
|
49
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
50
|
+
promptObject.manifest.kwargs.messages = messages.map((message) => {
|
|
51
|
+
const nestedVars = message?.kwargs?.prompt?.kwargs?.input_variables;
|
|
52
|
+
if (Array.isArray(nestedVars)) {
|
|
53
|
+
// eslint-disable-next-line no-param-reassign
|
|
54
|
+
message.kwargs.prompt.kwargs.input_variables =
|
|
55
|
+
nestedVars.map(stripDotNotation);
|
|
56
|
+
}
|
|
57
|
+
return message;
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
return promptObject;
|
|
62
|
+
}
|
|
63
|
+
export function generateModelImportMap(
|
|
64
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
65
|
+
modelClass) {
|
|
66
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
67
|
+
const modelImportMap = {};
|
|
68
|
+
// TODO: Fix in 0.4.0. We can't get lc_id without instantiating the class, so we
|
|
69
|
+
// must put them inline here. In the future, make this less hacky
|
|
70
|
+
// This should probably use dynamic imports and have a web-only entrypoint
|
|
71
|
+
// in a future breaking release
|
|
72
|
+
if (modelClass !== undefined) {
|
|
73
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
74
|
+
const modelLcName = modelClass?.lc_name();
|
|
75
|
+
let importMapKey;
|
|
76
|
+
if (modelLcName === "ChatAnthropic") {
|
|
77
|
+
importMapKey = "chat_models__anthropic";
|
|
78
|
+
}
|
|
79
|
+
else if (modelLcName === "ChatAzureOpenAI") {
|
|
80
|
+
importMapKey = "chat_models__openai";
|
|
81
|
+
}
|
|
82
|
+
else if (modelLcName === "ChatGoogleVertexAI") {
|
|
83
|
+
importMapKey = "chat_models__vertexai";
|
|
84
|
+
}
|
|
85
|
+
else if (modelLcName === "ChatGoogleGenerativeAI") {
|
|
86
|
+
importMapKey = "chat_models__google_genai";
|
|
87
|
+
}
|
|
88
|
+
else if (modelLcName === "ChatBedrockConverse") {
|
|
89
|
+
importMapKey = "chat_models__chat_bedrock_converse";
|
|
90
|
+
}
|
|
91
|
+
else if (modelLcName === "ChatMistral") {
|
|
92
|
+
importMapKey = "chat_models__mistralai";
|
|
93
|
+
}
|
|
94
|
+
else if (modelLcName === "ChatFireworks") {
|
|
95
|
+
importMapKey = "chat_models__fireworks";
|
|
96
|
+
}
|
|
97
|
+
else if (modelLcName === "ChatGroq") {
|
|
98
|
+
importMapKey = "chat_models__groq";
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
throw new Error("Received unsupport model class when pulling prompt.");
|
|
102
|
+
}
|
|
103
|
+
modelImportMap[importMapKey] = {
|
|
104
|
+
...modelImportMap[importMapKey],
|
|
105
|
+
[modelLcName]: modelClass,
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
return modelImportMap;
|
|
109
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.pull = exports.push = void 0;
|
|
4
|
+
const index_js_1 = require("../load/index.cjs");
|
|
5
|
+
const base_js_1 = require("./base.cjs");
|
|
6
|
+
Object.defineProperty(exports, "push", { enumerable: true, get: function () { return base_js_1.basePush; } });
|
|
7
|
+
/**
|
|
8
|
+
* Pull a prompt from the hub.
|
|
9
|
+
*
|
|
10
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
11
|
+
* @param options.apiKey LangSmith API key to use when pulling the prompt
|
|
12
|
+
* @param options.apiUrl LangSmith API URL to use when pulling the prompt
|
|
13
|
+
* @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,
|
|
14
|
+
* if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will
|
|
15
|
+
* also invoke the instantiated model. For non-OpenAI models, you must also set "modelClass" to the
|
|
16
|
+
* correct class of the model.
|
|
17
|
+
* @param options.modelClass If includeModel is true, the class of the model to instantiate. Required
|
|
18
|
+
* for non-OpenAI models. If you are running in Node or another environment that supports dynamic imports,
|
|
19
|
+
* you may instead import this function from "langchain/hub/node" and pass "includeModel: true" instead
|
|
20
|
+
* of specifying this parameter.
|
|
21
|
+
* @returns
|
|
22
|
+
*/
|
|
23
|
+
async function pull(ownerRepoCommit, options) {
|
|
24
|
+
const promptObject = await (0, base_js_1.basePull)(ownerRepoCommit, options);
|
|
25
|
+
try {
|
|
26
|
+
const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, undefined, (0, base_js_1.generateModelImportMap)(options?.modelClass));
|
|
27
|
+
return loadedPrompt;
|
|
28
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
29
|
+
}
|
|
30
|
+
catch (e) {
|
|
31
|
+
if (options?.includeModel) {
|
|
32
|
+
throw new Error([
|
|
33
|
+
e.message,
|
|
34
|
+
"",
|
|
35
|
+
`To load prompts with an associated non-OpenAI model, you must use the "langchain/hub/node" entrypoint, or pass a "modelClass" parameter like this:`,
|
|
36
|
+
"",
|
|
37
|
+
"```",
|
|
38
|
+
`import { pull } from "langchain/hub";`,
|
|
39
|
+
`import { ChatAnthropic } from "@langchain/anthropic";`,
|
|
40
|
+
"",
|
|
41
|
+
`const prompt = await pull("my-prompt", {`,
|
|
42
|
+
` includeModel: true,`,
|
|
43
|
+
` modelClass: ChatAnthropic,`,
|
|
44
|
+
`});`,
|
|
45
|
+
"```",
|
|
46
|
+
].join("\n"));
|
|
47
|
+
}
|
|
48
|
+
else {
|
|
49
|
+
throw e;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
exports.pull = pull;
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { Runnable } from "@langchain/core/runnables";
|
|
2
|
+
import type { BaseLanguageModel } from "@langchain/core/language_models/base";
|
|
3
|
+
import { basePush } from "./base.js";
|
|
4
|
+
export { basePush as push };
|
|
5
|
+
/**
|
|
6
|
+
* Pull a prompt from the hub.
|
|
7
|
+
*
|
|
8
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
9
|
+
* @param options.apiKey LangSmith API key to use when pulling the prompt
|
|
10
|
+
* @param options.apiUrl LangSmith API URL to use when pulling the prompt
|
|
11
|
+
* @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,
|
|
12
|
+
* if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will
|
|
13
|
+
* also invoke the instantiated model. For non-OpenAI models, you must also set "modelClass" to the
|
|
14
|
+
* correct class of the model.
|
|
15
|
+
* @param options.modelClass If includeModel is true, the class of the model to instantiate. Required
|
|
16
|
+
* for non-OpenAI models. If you are running in Node or another environment that supports dynamic imports,
|
|
17
|
+
* you may instead import this function from "langchain/hub/node" and pass "includeModel: true" instead
|
|
18
|
+
* of specifying this parameter.
|
|
19
|
+
* @returns
|
|
20
|
+
*/
|
|
21
|
+
export declare function pull<T extends Runnable>(ownerRepoCommit: string, options?: {
|
|
22
|
+
apiKey?: string;
|
|
23
|
+
apiUrl?: string;
|
|
24
|
+
includeModel?: boolean;
|
|
25
|
+
modelClass?: new (...args: any[]) => BaseLanguageModel;
|
|
26
|
+
}): Promise<T>;
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { load } from "../load/index.js";
|
|
2
|
+
import { basePush, basePull, generateModelImportMap } from "./base.js";
|
|
3
|
+
export { basePush as push };
|
|
4
|
+
/**
|
|
5
|
+
* Pull a prompt from the hub.
|
|
6
|
+
*
|
|
7
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
8
|
+
* @param options.apiKey LangSmith API key to use when pulling the prompt
|
|
9
|
+
* @param options.apiUrl LangSmith API URL to use when pulling the prompt
|
|
10
|
+
* @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,
|
|
11
|
+
* if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will
|
|
12
|
+
* also invoke the instantiated model. For non-OpenAI models, you must also set "modelClass" to the
|
|
13
|
+
* correct class of the model.
|
|
14
|
+
* @param options.modelClass If includeModel is true, the class of the model to instantiate. Required
|
|
15
|
+
* for non-OpenAI models. If you are running in Node or another environment that supports dynamic imports,
|
|
16
|
+
* you may instead import this function from "langchain/hub/node" and pass "includeModel: true" instead
|
|
17
|
+
* of specifying this parameter.
|
|
18
|
+
* @returns
|
|
19
|
+
*/
|
|
20
|
+
export async function pull(ownerRepoCommit, options) {
|
|
21
|
+
const promptObject = await basePull(ownerRepoCommit, options);
|
|
22
|
+
try {
|
|
23
|
+
const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, undefined, generateModelImportMap(options?.modelClass));
|
|
24
|
+
return loadedPrompt;
|
|
25
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
26
|
+
}
|
|
27
|
+
catch (e) {
|
|
28
|
+
if (options?.includeModel) {
|
|
29
|
+
throw new Error([
|
|
30
|
+
e.message,
|
|
31
|
+
"",
|
|
32
|
+
`To load prompts with an associated non-OpenAI model, you must use the "langchain/hub/node" entrypoint, or pass a "modelClass" parameter like this:`,
|
|
33
|
+
"",
|
|
34
|
+
"```",
|
|
35
|
+
`import { pull } from "langchain/hub";`,
|
|
36
|
+
`import { ChatAnthropic } from "@langchain/anthropic";`,
|
|
37
|
+
"",
|
|
38
|
+
`const prompt = await pull("my-prompt", {`,
|
|
39
|
+
` includeModel: true,`,
|
|
40
|
+
` modelClass: ChatAnthropic,`,
|
|
41
|
+
`});`,
|
|
42
|
+
"```",
|
|
43
|
+
].join("\n"));
|
|
44
|
+
}
|
|
45
|
+
else {
|
|
46
|
+
throw e;
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.pull = exports.push = void 0;
|
|
4
|
+
const base_js_1 = require("./base.cjs");
|
|
5
|
+
Object.defineProperty(exports, "push", { enumerable: true, get: function () { return base_js_1.basePush; } });
|
|
6
|
+
const index_js_1 = require("../load/index.cjs");
|
|
7
|
+
/**
|
|
8
|
+
* Pull a prompt from the hub.
|
|
9
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
10
|
+
* @param options.apiKey LangSmith API key to use when pulling the prompt
|
|
11
|
+
* @param options.apiUrl LangSmith API URL to use when pulling the prompt
|
|
12
|
+
* @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,
|
|
13
|
+
* if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will
|
|
14
|
+
* also invoke the instantiated model. You must have the appropriate LangChain integration package installed.
|
|
15
|
+
* @returns
|
|
16
|
+
*/
|
|
17
|
+
async function pull(ownerRepoCommit, options) {
|
|
18
|
+
const promptObject = await (0, base_js_1.basePull)(ownerRepoCommit, options);
|
|
19
|
+
let modelClass;
|
|
20
|
+
if (options?.includeModel) {
|
|
21
|
+
if (Array.isArray(promptObject.manifest.kwargs?.last?.kwargs?.bound?.id)) {
|
|
22
|
+
const modelName = promptObject.manifest.kwargs?.last?.kwargs?.bound?.id.at(-1);
|
|
23
|
+
if (modelName === "ChatAnthropic") {
|
|
24
|
+
modelClass = (await import("@langchain/anthropic")).ChatAnthropic;
|
|
25
|
+
}
|
|
26
|
+
else if (modelName === "ChatAzureOpenAI") {
|
|
27
|
+
modelClass = (await import("@langchain/openai")).AzureChatOpenAI;
|
|
28
|
+
}
|
|
29
|
+
else if (modelName === "ChatGoogleVertexAI") {
|
|
30
|
+
modelClass = (await import("@langchain/google-vertexai")).ChatVertexAI;
|
|
31
|
+
}
|
|
32
|
+
else if (modelName === "ChatGoogleGenerativeAI") {
|
|
33
|
+
modelClass = (await import("@langchain/google-genai"))
|
|
34
|
+
.ChatGoogleGenerativeAI;
|
|
35
|
+
}
|
|
36
|
+
else if (modelName === "ChatBedrockConverse") {
|
|
37
|
+
modelClass = (await import("@langchain/aws")).ChatBedrockConverse;
|
|
38
|
+
}
|
|
39
|
+
else if (modelName === "ChatMistral") {
|
|
40
|
+
modelClass = (await import("@langchain/mistralai")).ChatMistralAI;
|
|
41
|
+
}
|
|
42
|
+
else if (modelName === "ChatGroq") {
|
|
43
|
+
modelClass = (await import("@langchain/groq")).ChatGroq;
|
|
44
|
+
}
|
|
45
|
+
else if (modelName !== undefined) {
|
|
46
|
+
console.warn(`Received unknown model name from prompt hub: "${modelName}"`);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, undefined, (0, base_js_1.generateModelImportMap)(modelClass));
|
|
51
|
+
return loadedPrompt;
|
|
52
|
+
}
|
|
53
|
+
exports.pull = pull;
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { Runnable } from "@langchain/core/runnables";
|
|
2
|
+
import { basePush } from "./base.js";
|
|
3
|
+
export { basePush as push };
|
|
4
|
+
/**
|
|
5
|
+
* Pull a prompt from the hub.
|
|
6
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
7
|
+
* @param options.apiKey LangSmith API key to use when pulling the prompt
|
|
8
|
+
* @param options.apiUrl LangSmith API URL to use when pulling the prompt
|
|
9
|
+
* @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,
|
|
10
|
+
* if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will
|
|
11
|
+
* also invoke the instantiated model. You must have the appropriate LangChain integration package installed.
|
|
12
|
+
* @returns
|
|
13
|
+
*/
|
|
14
|
+
export declare function pull<T extends Runnable>(ownerRepoCommit: string, options?: {
|
|
15
|
+
apiKey?: string;
|
|
16
|
+
apiUrl?: string;
|
|
17
|
+
includeModel?: boolean;
|
|
18
|
+
}): Promise<T>;
|
package/dist/hub/node.js
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { basePush, basePull, generateModelImportMap } from "./base.js";
|
|
2
|
+
import { load } from "../load/index.js";
|
|
3
|
+
// TODO: Make this the default, add web entrypoint in next breaking release
|
|
4
|
+
export { basePush as push };
|
|
5
|
+
/**
|
|
6
|
+
* Pull a prompt from the hub.
|
|
7
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
8
|
+
* @param options.apiKey LangSmith API key to use when pulling the prompt
|
|
9
|
+
* @param options.apiUrl LangSmith API URL to use when pulling the prompt
|
|
10
|
+
* @param options.includeModel Whether to also instantiate and attach a model instance to the prompt,
|
|
11
|
+
* if the prompt has associated model metadata. If set to true, invoking the resulting pulled prompt will
|
|
12
|
+
* also invoke the instantiated model. You must have the appropriate LangChain integration package installed.
|
|
13
|
+
* @returns
|
|
14
|
+
*/
|
|
15
|
+
export async function pull(ownerRepoCommit, options) {
|
|
16
|
+
const promptObject = await basePull(ownerRepoCommit, options);
|
|
17
|
+
let modelClass;
|
|
18
|
+
if (options?.includeModel) {
|
|
19
|
+
if (Array.isArray(promptObject.manifest.kwargs?.last?.kwargs?.bound?.id)) {
|
|
20
|
+
const modelName = promptObject.manifest.kwargs?.last?.kwargs?.bound?.id.at(-1);
|
|
21
|
+
if (modelName === "ChatAnthropic") {
|
|
22
|
+
modelClass = (await import("@langchain/anthropic")).ChatAnthropic;
|
|
23
|
+
}
|
|
24
|
+
else if (modelName === "ChatAzureOpenAI") {
|
|
25
|
+
modelClass = (await import("@langchain/openai")).AzureChatOpenAI;
|
|
26
|
+
}
|
|
27
|
+
else if (modelName === "ChatGoogleVertexAI") {
|
|
28
|
+
modelClass = (await import("@langchain/google-vertexai")).ChatVertexAI;
|
|
29
|
+
}
|
|
30
|
+
else if (modelName === "ChatGoogleGenerativeAI") {
|
|
31
|
+
modelClass = (await import("@langchain/google-genai"))
|
|
32
|
+
.ChatGoogleGenerativeAI;
|
|
33
|
+
}
|
|
34
|
+
else if (modelName === "ChatBedrockConverse") {
|
|
35
|
+
modelClass = (await import("@langchain/aws")).ChatBedrockConverse;
|
|
36
|
+
}
|
|
37
|
+
else if (modelName === "ChatMistral") {
|
|
38
|
+
modelClass = (await import("@langchain/mistralai")).ChatMistralAI;
|
|
39
|
+
}
|
|
40
|
+
else if (modelName === "ChatGroq") {
|
|
41
|
+
modelClass = (await import("@langchain/groq")).ChatGroq;
|
|
42
|
+
}
|
|
43
|
+
else if (modelName !== undefined) {
|
|
44
|
+
console.warn(`Received unknown model name from prompt hub: "${modelName}"`);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, undefined, generateModelImportMap(modelClass));
|
|
49
|
+
return loadedPrompt;
|
|
50
|
+
}
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -25,7 +25,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
25
25
|
};
|
|
26
26
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
27
|
exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.stores__doc__base = exports.retrievers__matryoshka_retriever = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__ensemble = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.output_parsers = exports.callbacks = exports.document_transformers__openai_functions = exports.document_loaders__base = exports.memory__chat_memory = exports.memory = exports.text_splitter = exports.vectorstores__memory = exports.embeddings__fake = exports.embeddings__cache_backed = exports.chains__retrieval = exports.chains__openai_functions = exports.chains__history_aware_retriever = exports.chains__combine_documents__reduce = exports.chains__combine_documents = exports.chains = exports.tools__retriever = exports.tools__render = exports.tools__chain = exports.tools = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = void 0;
|
|
28
|
-
exports.schema__output = exports.schema__output_parser = exports.schema__runnable = exports.prompts__base = exports.prompts__pipeline = exports.prompts__image = exports.prompts__chat = exports.schema = exports.schema__messages = exports.prompts__prompt = exports.embeddings__openai = exports.llms__openai = exports.chat_models__openai = exports.schema__prompt_template = exports.schema__query_constructor = exports.indexes = exports.runnables__remote = exports.smith = exports.evaluation = exports.experimental__prompts__custom_format = exports.experimental__masking = exports.experimental__chains__violation_of_expectations = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_files = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = void 0;
|
|
28
|
+
exports.schema__output = exports.schema__output_parser = exports.schema__runnable = exports.prompts__base = exports.prompts__pipeline = exports.prompts__image = exports.prompts__chat = exports.schema = exports.schema__messages = exports.prompts__prompt = exports.embeddings__azure_openai = exports.embeddings__openai = exports.llms__azure_openai = exports.llms__openai = exports.chat_models__azure_openai = exports.chat_models__openai = exports.schema__prompt_template = exports.schema__query_constructor = exports.indexes = exports.runnables__remote = exports.smith = exports.evaluation = exports.experimental__prompts__custom_format = exports.experimental__masking = exports.experimental__chains__violation_of_expectations = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_files = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = void 0;
|
|
29
29
|
exports.agents = __importStar(require("../agents/index.cjs"));
|
|
30
30
|
exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
|
|
31
31
|
exports.agents__format_scratchpad = __importStar(require("../agents/format_scratchpad/openai_functions.cjs"));
|
|
@@ -103,14 +103,26 @@ const chat_models__openai = {
|
|
|
103
103
|
ChatOpenAI: openai_1.ChatOpenAI
|
|
104
104
|
};
|
|
105
105
|
exports.chat_models__openai = chat_models__openai;
|
|
106
|
+
const chat_models__azure_openai = {
|
|
107
|
+
AzureChatOpenAI: openai_1.AzureChatOpenAI
|
|
108
|
+
};
|
|
109
|
+
exports.chat_models__azure_openai = chat_models__azure_openai;
|
|
106
110
|
const llms__openai = {
|
|
107
111
|
OpenAI: openai_1.OpenAI
|
|
108
112
|
};
|
|
109
113
|
exports.llms__openai = llms__openai;
|
|
114
|
+
const llms__azure_openai = {
|
|
115
|
+
AzureOpenAI: openai_1.AzureOpenAI
|
|
116
|
+
};
|
|
117
|
+
exports.llms__azure_openai = llms__azure_openai;
|
|
110
118
|
const embeddings__openai = {
|
|
111
119
|
OpenAIEmbeddings: openai_1.OpenAIEmbeddings
|
|
112
120
|
};
|
|
113
121
|
exports.embeddings__openai = embeddings__openai;
|
|
122
|
+
const embeddings__azure_openai = {
|
|
123
|
+
AzureOpenAIEmbeddings: openai_1.AzureOpenAIEmbeddings
|
|
124
|
+
};
|
|
125
|
+
exports.embeddings__azure_openai = embeddings__azure_openai;
|
|
114
126
|
const prompts__prompt = {
|
|
115
127
|
PromptTemplate: prompts_1.PromptTemplate
|
|
116
128
|
};
|
|
@@ -64,7 +64,7 @@ export * as runnables__remote from "../runnables/remote.js";
|
|
|
64
64
|
export * as indexes from "../indexes/index.js";
|
|
65
65
|
export * as schema__query_constructor from "../schema/query_constructor.js";
|
|
66
66
|
export * as schema__prompt_template from "../schema/prompt_template.js";
|
|
67
|
-
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from "@langchain/openai";
|
|
67
|
+
import { ChatOpenAI, AzureChatOpenAI, OpenAI, AzureOpenAI, OpenAIEmbeddings, AzureOpenAIEmbeddings } from "@langchain/openai";
|
|
68
68
|
import { PromptTemplate, AIMessagePromptTemplate, ChatMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ImagePromptTemplate, PipelinePromptTemplate } from "@langchain/core/prompts";
|
|
69
69
|
import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk } from "@langchain/core/messages";
|
|
70
70
|
import { StringPromptValue } from "@langchain/core/prompt_values";
|
|
@@ -75,14 +75,26 @@ declare const chat_models__openai: {
|
|
|
75
75
|
ChatOpenAI: typeof ChatOpenAI;
|
|
76
76
|
};
|
|
77
77
|
export { chat_models__openai };
|
|
78
|
+
declare const chat_models__azure_openai: {
|
|
79
|
+
AzureChatOpenAI: typeof AzureChatOpenAI;
|
|
80
|
+
};
|
|
81
|
+
export { chat_models__azure_openai };
|
|
78
82
|
declare const llms__openai: {
|
|
79
83
|
OpenAI: typeof OpenAI;
|
|
80
84
|
};
|
|
81
85
|
export { llms__openai };
|
|
86
|
+
declare const llms__azure_openai: {
|
|
87
|
+
AzureOpenAI: typeof AzureOpenAI;
|
|
88
|
+
};
|
|
89
|
+
export { llms__azure_openai };
|
|
82
90
|
declare const embeddings__openai: {
|
|
83
91
|
OpenAIEmbeddings: typeof OpenAIEmbeddings;
|
|
84
92
|
};
|
|
85
93
|
export { embeddings__openai };
|
|
94
|
+
declare const embeddings__azure_openai: {
|
|
95
|
+
AzureOpenAIEmbeddings: typeof AzureOpenAIEmbeddings;
|
|
96
|
+
};
|
|
97
|
+
export { embeddings__azure_openai };
|
|
86
98
|
declare const prompts__prompt: {
|
|
87
99
|
PromptTemplate: typeof PromptTemplate;
|
|
88
100
|
};
|
package/dist/load/import_map.js
CHANGED
|
@@ -65,7 +65,7 @@ export * as runnables__remote from "../runnables/remote.js";
|
|
|
65
65
|
export * as indexes from "../indexes/index.js";
|
|
66
66
|
export * as schema__query_constructor from "../schema/query_constructor.js";
|
|
67
67
|
export * as schema__prompt_template from "../schema/prompt_template.js";
|
|
68
|
-
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from "@langchain/openai";
|
|
68
|
+
import { ChatOpenAI, AzureChatOpenAI, OpenAI, AzureOpenAI, OpenAIEmbeddings, AzureOpenAIEmbeddings } from "@langchain/openai";
|
|
69
69
|
import { PromptTemplate, AIMessagePromptTemplate, ChatMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ImagePromptTemplate, PipelinePromptTemplate } from "@langchain/core/prompts";
|
|
70
70
|
import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk } from "@langchain/core/messages";
|
|
71
71
|
import { StringPromptValue } from "@langchain/core/prompt_values";
|
|
@@ -76,14 +76,26 @@ const chat_models__openai = {
|
|
|
76
76
|
ChatOpenAI
|
|
77
77
|
};
|
|
78
78
|
export { chat_models__openai };
|
|
79
|
+
const chat_models__azure_openai = {
|
|
80
|
+
AzureChatOpenAI
|
|
81
|
+
};
|
|
82
|
+
export { chat_models__azure_openai };
|
|
79
83
|
const llms__openai = {
|
|
80
84
|
OpenAI
|
|
81
85
|
};
|
|
82
86
|
export { llms__openai };
|
|
87
|
+
const llms__azure_openai = {
|
|
88
|
+
AzureOpenAI
|
|
89
|
+
};
|
|
90
|
+
export { llms__azure_openai };
|
|
83
91
|
const embeddings__openai = {
|
|
84
92
|
OpenAIEmbeddings
|
|
85
93
|
};
|
|
86
94
|
export { embeddings__openai };
|
|
95
|
+
const embeddings__azure_openai = {
|
|
96
|
+
AzureOpenAIEmbeddings
|
|
97
|
+
};
|
|
98
|
+
export { embeddings__azure_openai };
|
|
87
99
|
const prompts__prompt = {
|
|
88
100
|
PromptTemplate
|
|
89
101
|
};
|
package/dist/load/index.cjs
CHANGED
|
@@ -40,12 +40,14 @@ async function load(text,
|
|
|
40
40
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
41
41
|
secretsMap = {},
|
|
42
42
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
43
|
-
optionalImportsMap = {}
|
|
43
|
+
optionalImportsMap = {},
|
|
44
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
45
|
+
additionalImportsMap = {}) {
|
|
44
46
|
return (0, load_1.load)(text, {
|
|
45
47
|
secretsMap,
|
|
46
48
|
optionalImportsMap,
|
|
47
49
|
optionalImportEntrypoints: import_constants_js_1.optionalImportEntrypoints,
|
|
48
|
-
importMap,
|
|
50
|
+
importMap: { ...importMap, ...additionalImportsMap },
|
|
49
51
|
});
|
|
50
52
|
}
|
|
51
53
|
exports.load = load;
|
package/dist/load/index.d.ts
CHANGED
|
@@ -8,4 +8,4 @@ import { OptionalImportMap } from "./import_type.js";
|
|
|
8
8
|
* @param optionalImportsMap
|
|
9
9
|
* @returns A loaded instance of a LangChain module.
|
|
10
10
|
*/
|
|
11
|
-
export declare function load<T>(text: string, secretsMap?: Record<string, any>, optionalImportsMap?: OptionalImportMap & Record<string, any>): Promise<T>;
|
|
11
|
+
export declare function load<T>(text: string, secretsMap?: Record<string, any>, optionalImportsMap?: OptionalImportMap & Record<string, any>, additionalImportsMap?: Record<string, any>): Promise<T>;
|
package/dist/load/index.js
CHANGED
|
@@ -14,11 +14,13 @@ export async function load(text,
|
|
|
14
14
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
15
15
|
secretsMap = {},
|
|
16
16
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
17
|
-
optionalImportsMap = {}
|
|
17
|
+
optionalImportsMap = {},
|
|
18
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
19
|
+
additionalImportsMap = {}) {
|
|
18
20
|
return coreLoad(text, {
|
|
19
21
|
secretsMap,
|
|
20
22
|
optionalImportsMap,
|
|
21
23
|
optionalImportEntrypoints,
|
|
22
|
-
importMap,
|
|
24
|
+
importMap: { ...importMap, ...additionalImportsMap },
|
|
23
25
|
});
|
|
24
26
|
}
|
package/hub/node.cjs
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/hub/node.cjs');
|
package/hub/node.d.cts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/hub/node.js'
|
package/hub/node.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/hub/node.js'
|
package/hub/node.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/hub/node.js'
|
package/hub.cjs
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
module.exports = require('./dist/hub.cjs');
|
|
1
|
+
module.exports = require('./dist/hub/index.cjs');
|
package/hub.d.cts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export * from './dist/hub.js'
|
|
1
|
+
export * from './dist/hub/index.js'
|
package/hub.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export * from './dist/hub.js'
|
|
1
|
+
export * from './dist/hub/index.js'
|
package/hub.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export * from './dist/hub.js'
|
|
1
|
+
export * from './dist/hub/index.js'
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langchain",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.13",
|
|
4
4
|
"description": "Typescript bindings for langchain",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -306,6 +306,10 @@
|
|
|
306
306
|
"hub.js",
|
|
307
307
|
"hub.d.ts",
|
|
308
308
|
"hub.d.cts",
|
|
309
|
+
"hub/node.cjs",
|
|
310
|
+
"hub/node.js",
|
|
311
|
+
"hub/node.d.ts",
|
|
312
|
+
"hub/node.d.cts",
|
|
309
313
|
"util/document.cjs",
|
|
310
314
|
"util/document.js",
|
|
311
315
|
"util/document.d.ts",
|
|
@@ -525,7 +529,7 @@
|
|
|
525
529
|
}
|
|
526
530
|
},
|
|
527
531
|
"dependencies": {
|
|
528
|
-
"@langchain/openai": ">=0.1.0 <0.
|
|
532
|
+
"@langchain/openai": ">=0.1.0 <0.5.0",
|
|
529
533
|
"@langchain/textsplitters": ">=0.0.0 <0.2.0",
|
|
530
534
|
"js-tiktoken": "^1.0.12",
|
|
531
535
|
"js-yaml": "^4.1.0",
|
|
@@ -1222,6 +1226,15 @@
|
|
|
1222
1226
|
"import": "./hub.js",
|
|
1223
1227
|
"require": "./hub.cjs"
|
|
1224
1228
|
},
|
|
1229
|
+
"./hub/node": {
|
|
1230
|
+
"types": {
|
|
1231
|
+
"import": "./hub/node.d.ts",
|
|
1232
|
+
"require": "./hub/node.d.cts",
|
|
1233
|
+
"default": "./hub/node.d.ts"
|
|
1234
|
+
},
|
|
1235
|
+
"import": "./hub/node.js",
|
|
1236
|
+
"require": "./hub/node.cjs"
|
|
1237
|
+
},
|
|
1225
1238
|
"./util/document": {
|
|
1226
1239
|
"types": {
|
|
1227
1240
|
"import": "./util/document.d.ts",
|
package/dist/hub.cjs
DELETED
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.pull = exports.push = void 0;
|
|
4
|
-
const langsmith_1 = require("langsmith");
|
|
5
|
-
const index_js_1 = require("./load/index.cjs");
|
|
6
|
-
/**
|
|
7
|
-
* Push a prompt to the hub.
|
|
8
|
-
* If the specified repo doesn't already exist, it will be created.
|
|
9
|
-
* @param repoFullName The full name of the repo.
|
|
10
|
-
* @param runnable The prompt to push.
|
|
11
|
-
* @param options
|
|
12
|
-
* @returns The URL of the newly pushed prompt in the hub.
|
|
13
|
-
*/
|
|
14
|
-
async function push(repoFullName, runnable, options) {
|
|
15
|
-
const client = new langsmith_1.Client(options);
|
|
16
|
-
const payloadOptions = {
|
|
17
|
-
object: runnable,
|
|
18
|
-
parentCommitHash: options?.parentCommitHash,
|
|
19
|
-
isPublic: options?.isPublic ?? options?.newRepoIsPublic,
|
|
20
|
-
description: options?.description ?? options?.newRepoDescription,
|
|
21
|
-
readme: options?.readme,
|
|
22
|
-
tags: options?.tags,
|
|
23
|
-
};
|
|
24
|
-
return client.pushPrompt(repoFullName, payloadOptions);
|
|
25
|
-
}
|
|
26
|
-
exports.push = push;
|
|
27
|
-
/**
|
|
28
|
-
* Pull a prompt from the hub.
|
|
29
|
-
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
30
|
-
* @param options
|
|
31
|
-
* @returns
|
|
32
|
-
*/
|
|
33
|
-
async function pull(ownerRepoCommit, options) {
|
|
34
|
-
const client = new langsmith_1.Client(options);
|
|
35
|
-
const promptObject = await client.pullPromptCommit(ownerRepoCommit, {
|
|
36
|
-
includeModel: options?.includeModel,
|
|
37
|
-
});
|
|
38
|
-
if (promptObject.manifest.kwargs?.metadata === undefined) {
|
|
39
|
-
promptObject.manifest.kwargs = {
|
|
40
|
-
...promptObject.manifest.kwargs,
|
|
41
|
-
metadata: {},
|
|
42
|
-
};
|
|
43
|
-
}
|
|
44
|
-
promptObject.manifest.kwargs.metadata = {
|
|
45
|
-
...promptObject.manifest.kwargs.metadata,
|
|
46
|
-
lc_hub_owner: promptObject.owner,
|
|
47
|
-
lc_hub_repo: promptObject.repo,
|
|
48
|
-
lc_hub_commit_hash: promptObject.commit_hash,
|
|
49
|
-
};
|
|
50
|
-
return (0, index_js_1.load)(JSON.stringify(promptObject.manifest));
|
|
51
|
-
}
|
|
52
|
-
exports.pull = pull;
|
package/dist/hub.js
DELETED
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
import { Client } from "langsmith";
|
|
2
|
-
import { load } from "./load/index.js";
|
|
3
|
-
/**
|
|
4
|
-
* Push a prompt to the hub.
|
|
5
|
-
* If the specified repo doesn't already exist, it will be created.
|
|
6
|
-
* @param repoFullName The full name of the repo.
|
|
7
|
-
* @param runnable The prompt to push.
|
|
8
|
-
* @param options
|
|
9
|
-
* @returns The URL of the newly pushed prompt in the hub.
|
|
10
|
-
*/
|
|
11
|
-
export async function push(repoFullName, runnable, options) {
|
|
12
|
-
const client = new Client(options);
|
|
13
|
-
const payloadOptions = {
|
|
14
|
-
object: runnable,
|
|
15
|
-
parentCommitHash: options?.parentCommitHash,
|
|
16
|
-
isPublic: options?.isPublic ?? options?.newRepoIsPublic,
|
|
17
|
-
description: options?.description ?? options?.newRepoDescription,
|
|
18
|
-
readme: options?.readme,
|
|
19
|
-
tags: options?.tags,
|
|
20
|
-
};
|
|
21
|
-
return client.pushPrompt(repoFullName, payloadOptions);
|
|
22
|
-
}
|
|
23
|
-
/**
|
|
24
|
-
* Pull a prompt from the hub.
|
|
25
|
-
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
26
|
-
* @param options
|
|
27
|
-
* @returns
|
|
28
|
-
*/
|
|
29
|
-
export async function pull(ownerRepoCommit, options) {
|
|
30
|
-
const client = new Client(options);
|
|
31
|
-
const promptObject = await client.pullPromptCommit(ownerRepoCommit, {
|
|
32
|
-
includeModel: options?.includeModel,
|
|
33
|
-
});
|
|
34
|
-
if (promptObject.manifest.kwargs?.metadata === undefined) {
|
|
35
|
-
promptObject.manifest.kwargs = {
|
|
36
|
-
...promptObject.manifest.kwargs,
|
|
37
|
-
metadata: {},
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
promptObject.manifest.kwargs.metadata = {
|
|
41
|
-
...promptObject.manifest.kwargs.metadata,
|
|
42
|
-
lc_hub_owner: promptObject.owner,
|
|
43
|
-
lc_hub_repo: promptObject.repo,
|
|
44
|
-
lc_hub_commit_hash: promptObject.commit_hash,
|
|
45
|
-
};
|
|
46
|
-
return load(JSON.stringify(promptObject.manifest));
|
|
47
|
-
}
|