langchain 0.0.197 → 0.0.199
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/index.d.ts +1 -1
- package/dist/chains/conversational_retrieval_chain.cjs +16 -2
- package/dist/chains/conversational_retrieval_chain.d.ts +2 -0
- package/dist/chains/conversational_retrieval_chain.js +16 -2
- package/dist/chat_models/llama_cpp.cjs +45 -0
- package/dist/chat_models/llama_cpp.d.ts +4 -1
- package/dist/chat_models/llama_cpp.js +45 -0
- package/dist/document_loaders/fs/chatgpt.cjs +85 -0
- package/dist/document_loaders/fs/chatgpt.d.ts +8 -0
- package/dist/document_loaders/fs/chatgpt.js +81 -0
- package/dist/document_loaders/fs/pptx.cjs +39 -0
- package/dist/document_loaders/fs/pptx.d.ts +23 -0
- package/dist/document_loaders/fs/pptx.js +35 -0
- package/dist/document_loaders/web/confluence.cjs +31 -7
- package/dist/document_loaders/web/confluence.d.ts +12 -5
- package/dist/document_loaders/web/confluence.js +31 -7
- package/dist/experimental/openai_assistant/index.cjs +32 -0
- package/dist/experimental/openai_assistant/index.d.ts +26 -0
- package/dist/experimental/openai_assistant/index.js +32 -0
- package/dist/experimental/tools/pyinterpreter.cjs +248 -0
- package/dist/experimental/tools/pyinterpreter.d.ts +18 -0
- package/dist/experimental/tools/pyinterpreter.js +244 -0
- package/dist/graphs/neo4j_graph.cjs +49 -14
- package/dist/graphs/neo4j_graph.d.ts +30 -0
- package/dist/graphs/neo4j_graph.js +49 -14
- package/dist/llms/gradient_ai.cjs +98 -0
- package/dist/llms/gradient_ai.d.ts +50 -0
- package/dist/llms/gradient_ai.js +94 -0
- package/dist/llms/hf.cjs +13 -2
- package/dist/llms/hf.d.ts +5 -0
- package/dist/llms/hf.js +13 -2
- package/dist/llms/llama_cpp.cjs +17 -3
- package/dist/llms/llama_cpp.d.ts +4 -1
- package/dist/llms/llama_cpp.js +17 -3
- package/dist/llms/watsonx_ai.cjs +154 -0
- package/dist/llms/watsonx_ai.d.ts +72 -0
- package/dist/llms/watsonx_ai.js +150 -0
- package/dist/load/import_constants.cjs +6 -0
- package/dist/load/import_constants.js +6 -0
- package/dist/load/import_map.cjs +4 -3
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/output_parsers/json.cjs +4 -0
- package/dist/output_parsers/json.js +4 -0
- package/dist/tools/google_places.cjs +81 -0
- package/dist/tools/google_places.d.ts +21 -0
- package/dist/tools/google_places.js +77 -0
- package/dist/vectorstores/clickhouse.cjs +286 -0
- package/dist/vectorstores/clickhouse.d.ts +126 -0
- package/dist/vectorstores/clickhouse.js +259 -0
- package/dist/vectorstores/elasticsearch.cjs +16 -3
- package/dist/vectorstores/elasticsearch.d.ts +6 -2
- package/dist/vectorstores/elasticsearch.js +16 -3
- package/dist/vectorstores/pgvector.cjs +142 -18
- package/dist/vectorstores/pgvector.d.ts +21 -0
- package/dist/vectorstores/pgvector.js +142 -18
- package/dist/vectorstores/prisma.cjs +1 -1
- package/dist/vectorstores/prisma.js +1 -1
- package/dist/vectorstores/weaviate.cjs +45 -2
- package/dist/vectorstores/weaviate.d.ts +27 -1
- package/dist/vectorstores/weaviate.js +45 -2
- package/dist/vectorstores/xata.cjs +3 -2
- package/dist/vectorstores/xata.js +3 -2
- package/document_loaders/fs/chatgpt.cjs +1 -0
- package/document_loaders/fs/chatgpt.d.ts +1 -0
- package/document_loaders/fs/chatgpt.js +1 -0
- package/document_loaders/fs/pptx.cjs +1 -0
- package/document_loaders/fs/pptx.d.ts +1 -0
- package/document_loaders/fs/pptx.js +1 -0
- package/experimental/tools/pyinterpreter.cjs +1 -0
- package/experimental/tools/pyinterpreter.d.ts +1 -0
- package/experimental/tools/pyinterpreter.js +1 -0
- package/llms/gradient_ai.cjs +1 -0
- package/llms/gradient_ai.d.ts +1 -0
- package/llms/gradient_ai.js +1 -0
- package/llms/watsonx_ai.cjs +1 -0
- package/llms/watsonx_ai.d.ts +1 -0
- package/llms/watsonx_ai.js +1 -0
- package/package.json +87 -13
- package/tools/google_places.cjs +1 -0
- package/tools/google_places.d.ts +1 -0
- package/tools/google_places.js +1 -0
- package/vectorstores/clickhouse.cjs +1 -0
- package/vectorstores/clickhouse.d.ts +1 -0
- package/vectorstores/clickhouse.js +1 -0
|
@@ -59,6 +59,16 @@ class Neo4jGraph {
|
|
|
59
59
|
writable: true,
|
|
60
60
|
value: ""
|
|
61
61
|
});
|
|
62
|
+
Object.defineProperty(this, "structuredSchema", {
|
|
63
|
+
enumerable: true,
|
|
64
|
+
configurable: true,
|
|
65
|
+
writable: true,
|
|
66
|
+
value: {
|
|
67
|
+
nodeProps: {},
|
|
68
|
+
relProps: {},
|
|
69
|
+
relationships: [],
|
|
70
|
+
}
|
|
71
|
+
});
|
|
62
72
|
try {
|
|
63
73
|
this.driver = neo4j_driver_1.default.driver(url, neo4j_driver_1.default.auth.basic(username, password));
|
|
64
74
|
this.database = database;
|
|
@@ -96,6 +106,9 @@ class Neo4jGraph {
|
|
|
96
106
|
getSchema() {
|
|
97
107
|
return this.schema;
|
|
98
108
|
}
|
|
109
|
+
getStructuredSchema() {
|
|
110
|
+
return this.structuredSchema;
|
|
111
|
+
}
|
|
99
112
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
100
113
|
async query(query, params = {}) {
|
|
101
114
|
try {
|
|
@@ -138,21 +151,43 @@ class Neo4jGraph {
|
|
|
138
151
|
YIELD label, other, elementType, type, property
|
|
139
152
|
WHERE type = "RELATIONSHIP" AND elementType = "node"
|
|
140
153
|
UNWIND other AS other_node
|
|
141
|
-
RETURN
|
|
142
|
-
`;
|
|
143
|
-
const nodeProperties = await this.query(nodePropertiesQuery);
|
|
144
|
-
const relationshipsProperties = await this.query(relPropertiesQuery);
|
|
145
|
-
const relationships = await this.query(relQuery);
|
|
146
|
-
this.schema = `
|
|
147
|
-
Node properties are the following:
|
|
148
|
-
${JSON.stringify(nodeProperties?.map((el) => el.output))}
|
|
149
|
-
|
|
150
|
-
Relationship properties are the following:
|
|
151
|
-
${JSON.stringify(relationshipsProperties?.map((el) => el.output))}
|
|
152
|
-
|
|
153
|
-
The relationships are the following:
|
|
154
|
-
${JSON.stringify(relationships?.map((el) => el.output))}
|
|
154
|
+
RETURN {start: label, type: property, end: toString(other_node)} AS output
|
|
155
155
|
`;
|
|
156
|
+
// Assuming query method is defined and returns a Promise
|
|
157
|
+
const nodeProperties = (await this.query(nodePropertiesQuery))?.map((el) => el.output);
|
|
158
|
+
const relationshipsProperties = (await this.query(relPropertiesQuery))?.map((el) => el.output);
|
|
159
|
+
const relationships = (await this.query(relQuery))?.map((el) => el.output);
|
|
160
|
+
// Structured schema similar to Python's dictionary comprehension
|
|
161
|
+
this.structuredSchema = {
|
|
162
|
+
nodeProps: Object.fromEntries(nodeProperties?.map((el) => [el.labels, el.properties]) || []),
|
|
163
|
+
relProps: Object.fromEntries(relationshipsProperties?.map((el) => [el.type, el.properties]) || []),
|
|
164
|
+
relationships: relationships || [],
|
|
165
|
+
};
|
|
166
|
+
// Format node properties
|
|
167
|
+
const formattedNodeProps = nodeProperties?.map((el) => {
|
|
168
|
+
const propsStr = el.properties
|
|
169
|
+
.map((prop) => `${prop.property}: ${prop.type}`)
|
|
170
|
+
.join(", ");
|
|
171
|
+
return `${el.labels} {${propsStr}}`;
|
|
172
|
+
});
|
|
173
|
+
// Format relationship properties
|
|
174
|
+
const formattedRelProps = relationshipsProperties?.map((el) => {
|
|
175
|
+
const propsStr = el.properties
|
|
176
|
+
.map((prop) => `${prop.property}: ${prop.type}`)
|
|
177
|
+
.join(", ");
|
|
178
|
+
return `${el.type} {${propsStr}}`;
|
|
179
|
+
});
|
|
180
|
+
// Format relationships
|
|
181
|
+
const formattedRels = relationships?.map((el) => `(:${el.start})-[:${el.type}]->(:${el.end})`);
|
|
182
|
+
// Combine all formatted elements into a single string
|
|
183
|
+
this.schema = [
|
|
184
|
+
"Node properties are the following:",
|
|
185
|
+
formattedNodeProps?.join(", "),
|
|
186
|
+
"Relationship properties are the following:",
|
|
187
|
+
formattedRelProps?.join(", "),
|
|
188
|
+
"The relationships are the following:",
|
|
189
|
+
formattedRels?.join(", "),
|
|
190
|
+
].join("\n");
|
|
156
191
|
}
|
|
157
192
|
async close() {
|
|
158
193
|
await this.driver.close();
|
|
@@ -4,6 +4,34 @@ interface Neo4jGraphConfig {
|
|
|
4
4
|
password: string;
|
|
5
5
|
database?: string;
|
|
6
6
|
}
|
|
7
|
+
interface StructuredSchema {
|
|
8
|
+
nodeProps: {
|
|
9
|
+
[key: NodeType["labels"]]: NodeType["properties"];
|
|
10
|
+
};
|
|
11
|
+
relProps: {
|
|
12
|
+
[key: RelType["type"]]: RelType["properties"];
|
|
13
|
+
};
|
|
14
|
+
relationships: PathType[];
|
|
15
|
+
}
|
|
16
|
+
type NodeType = {
|
|
17
|
+
labels: string;
|
|
18
|
+
properties: {
|
|
19
|
+
property: string;
|
|
20
|
+
type: string;
|
|
21
|
+
}[];
|
|
22
|
+
};
|
|
23
|
+
type RelType = {
|
|
24
|
+
type: string;
|
|
25
|
+
properties: {
|
|
26
|
+
property: string;
|
|
27
|
+
type: string;
|
|
28
|
+
}[];
|
|
29
|
+
};
|
|
30
|
+
type PathType = {
|
|
31
|
+
start: string;
|
|
32
|
+
type: string;
|
|
33
|
+
end: string;
|
|
34
|
+
};
|
|
7
35
|
/**
|
|
8
36
|
* @security *Security note*: Make sure that the database connection uses credentials
|
|
9
37
|
* that are narrowly-scoped to only include necessary permissions.
|
|
@@ -22,9 +50,11 @@ export declare class Neo4jGraph {
|
|
|
22
50
|
private driver;
|
|
23
51
|
private database;
|
|
24
52
|
private schema;
|
|
53
|
+
private structuredSchema;
|
|
25
54
|
constructor({ url, username, password, database, }: Neo4jGraphConfig);
|
|
26
55
|
static initialize(config: Neo4jGraphConfig): Promise<Neo4jGraph>;
|
|
27
56
|
getSchema(): string;
|
|
57
|
+
getStructuredSchema(): StructuredSchema;
|
|
28
58
|
query(query: string, params?: any): Promise<any[] | undefined>;
|
|
29
59
|
verifyConnectivity(): Promise<void>;
|
|
30
60
|
refreshSchema(): Promise<void>;
|
|
@@ -33,6 +33,16 @@ export class Neo4jGraph {
|
|
|
33
33
|
writable: true,
|
|
34
34
|
value: ""
|
|
35
35
|
});
|
|
36
|
+
Object.defineProperty(this, "structuredSchema", {
|
|
37
|
+
enumerable: true,
|
|
38
|
+
configurable: true,
|
|
39
|
+
writable: true,
|
|
40
|
+
value: {
|
|
41
|
+
nodeProps: {},
|
|
42
|
+
relProps: {},
|
|
43
|
+
relationships: [],
|
|
44
|
+
}
|
|
45
|
+
});
|
|
36
46
|
try {
|
|
37
47
|
this.driver = neo4j.driver(url, neo4j.auth.basic(username, password));
|
|
38
48
|
this.database = database;
|
|
@@ -70,6 +80,9 @@ export class Neo4jGraph {
|
|
|
70
80
|
getSchema() {
|
|
71
81
|
return this.schema;
|
|
72
82
|
}
|
|
83
|
+
getStructuredSchema() {
|
|
84
|
+
return this.structuredSchema;
|
|
85
|
+
}
|
|
73
86
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
74
87
|
async query(query, params = {}) {
|
|
75
88
|
try {
|
|
@@ -112,21 +125,43 @@ export class Neo4jGraph {
|
|
|
112
125
|
YIELD label, other, elementType, type, property
|
|
113
126
|
WHERE type = "RELATIONSHIP" AND elementType = "node"
|
|
114
127
|
UNWIND other AS other_node
|
|
115
|
-
RETURN
|
|
116
|
-
`;
|
|
117
|
-
const nodeProperties = await this.query(nodePropertiesQuery);
|
|
118
|
-
const relationshipsProperties = await this.query(relPropertiesQuery);
|
|
119
|
-
const relationships = await this.query(relQuery);
|
|
120
|
-
this.schema = `
|
|
121
|
-
Node properties are the following:
|
|
122
|
-
${JSON.stringify(nodeProperties?.map((el) => el.output))}
|
|
123
|
-
|
|
124
|
-
Relationship properties are the following:
|
|
125
|
-
${JSON.stringify(relationshipsProperties?.map((el) => el.output))}
|
|
126
|
-
|
|
127
|
-
The relationships are the following:
|
|
128
|
-
${JSON.stringify(relationships?.map((el) => el.output))}
|
|
128
|
+
RETURN {start: label, type: property, end: toString(other_node)} AS output
|
|
129
129
|
`;
|
|
130
|
+
// Assuming query method is defined and returns a Promise
|
|
131
|
+
const nodeProperties = (await this.query(nodePropertiesQuery))?.map((el) => el.output);
|
|
132
|
+
const relationshipsProperties = (await this.query(relPropertiesQuery))?.map((el) => el.output);
|
|
133
|
+
const relationships = (await this.query(relQuery))?.map((el) => el.output);
|
|
134
|
+
// Structured schema similar to Python's dictionary comprehension
|
|
135
|
+
this.structuredSchema = {
|
|
136
|
+
nodeProps: Object.fromEntries(nodeProperties?.map((el) => [el.labels, el.properties]) || []),
|
|
137
|
+
relProps: Object.fromEntries(relationshipsProperties?.map((el) => [el.type, el.properties]) || []),
|
|
138
|
+
relationships: relationships || [],
|
|
139
|
+
};
|
|
140
|
+
// Format node properties
|
|
141
|
+
const formattedNodeProps = nodeProperties?.map((el) => {
|
|
142
|
+
const propsStr = el.properties
|
|
143
|
+
.map((prop) => `${prop.property}: ${prop.type}`)
|
|
144
|
+
.join(", ");
|
|
145
|
+
return `${el.labels} {${propsStr}}`;
|
|
146
|
+
});
|
|
147
|
+
// Format relationship properties
|
|
148
|
+
const formattedRelProps = relationshipsProperties?.map((el) => {
|
|
149
|
+
const propsStr = el.properties
|
|
150
|
+
.map((prop) => `${prop.property}: ${prop.type}`)
|
|
151
|
+
.join(", ");
|
|
152
|
+
return `${el.type} {${propsStr}}`;
|
|
153
|
+
});
|
|
154
|
+
// Format relationships
|
|
155
|
+
const formattedRels = relationships?.map((el) => `(:${el.start})-[:${el.type}]->(:${el.end})`);
|
|
156
|
+
// Combine all formatted elements into a single string
|
|
157
|
+
this.schema = [
|
|
158
|
+
"Node properties are the following:",
|
|
159
|
+
formattedNodeProps?.join(", "),
|
|
160
|
+
"Relationship properties are the following:",
|
|
161
|
+
formattedRelProps?.join(", "),
|
|
162
|
+
"The relationships are the following:",
|
|
163
|
+
formattedRels?.join(", "),
|
|
164
|
+
].join("\n");
|
|
130
165
|
}
|
|
131
166
|
async close() {
|
|
132
167
|
await this.driver.close();
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GradientLLM = void 0;
|
|
4
|
+
const nodejs_sdk_1 = require("@gradientai/nodejs-sdk");
|
|
5
|
+
const base_js_1 = require("./base.cjs");
|
|
6
|
+
const env_js_1 = require("../util/env.cjs");
|
|
7
|
+
/**
|
|
8
|
+
* The GradientLLM class is used to interact with Gradient AI inference Endpoint models.
|
|
9
|
+
* This requires your Gradient AI Access Token which is autoloaded if not specified.
|
|
10
|
+
*/
|
|
11
|
+
class GradientLLM extends base_js_1.LLM {
|
|
12
|
+
static lc_name() {
|
|
13
|
+
return "GradientLLM";
|
|
14
|
+
}
|
|
15
|
+
get lc_secrets() {
|
|
16
|
+
return {
|
|
17
|
+
gradientAccessKey: "GRADIENT_ACCESS_TOKEN",
|
|
18
|
+
workspaceId: "GRADIENT_WORKSPACE_ID",
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
constructor(fields) {
|
|
22
|
+
super(fields);
|
|
23
|
+
Object.defineProperty(this, "modelSlug", {
|
|
24
|
+
enumerable: true,
|
|
25
|
+
configurable: true,
|
|
26
|
+
writable: true,
|
|
27
|
+
value: "llama2-7b-chat"
|
|
28
|
+
});
|
|
29
|
+
Object.defineProperty(this, "gradientAccessKey", {
|
|
30
|
+
enumerable: true,
|
|
31
|
+
configurable: true,
|
|
32
|
+
writable: true,
|
|
33
|
+
value: void 0
|
|
34
|
+
});
|
|
35
|
+
Object.defineProperty(this, "workspaceId", {
|
|
36
|
+
enumerable: true,
|
|
37
|
+
configurable: true,
|
|
38
|
+
writable: true,
|
|
39
|
+
value: void 0
|
|
40
|
+
});
|
|
41
|
+
Object.defineProperty(this, "inferenceParameters", {
|
|
42
|
+
enumerable: true,
|
|
43
|
+
configurable: true,
|
|
44
|
+
writable: true,
|
|
45
|
+
value: void 0
|
|
46
|
+
});
|
|
47
|
+
// Gradient AI does not export the BaseModel type. Once it does, we can use it here.
|
|
48
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
49
|
+
Object.defineProperty(this, "baseModel", {
|
|
50
|
+
enumerable: true,
|
|
51
|
+
configurable: true,
|
|
52
|
+
writable: true,
|
|
53
|
+
value: void 0
|
|
54
|
+
});
|
|
55
|
+
this.modelSlug = fields?.modelSlug ?? this.modelSlug;
|
|
56
|
+
this.gradientAccessKey =
|
|
57
|
+
fields?.gradientAccessKey ??
|
|
58
|
+
(0, env_js_1.getEnvironmentVariable)("GRADIENT_ACCESS_TOKEN");
|
|
59
|
+
this.workspaceId =
|
|
60
|
+
fields?.workspaceId ?? (0, env_js_1.getEnvironmentVariable)("GRADIENT_WORKSPACE_ID");
|
|
61
|
+
this.inferenceParameters = fields.inferenceParameters;
|
|
62
|
+
if (!this.gradientAccessKey) {
|
|
63
|
+
throw new Error("Missing Gradient AI Access Token");
|
|
64
|
+
}
|
|
65
|
+
if (!this.workspaceId) {
|
|
66
|
+
throw new Error("Missing Gradient AI Workspace ID");
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
_llmType() {
|
|
70
|
+
return "gradient_ai";
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Calls the Gradient AI endpoint and retrieves the result.
|
|
74
|
+
* @param {string} prompt The input prompt.
|
|
75
|
+
* @returns {Promise<string>} A promise that resolves to the generated string.
|
|
76
|
+
*/
|
|
77
|
+
/** @ignore */
|
|
78
|
+
async _call(prompt, _options) {
|
|
79
|
+
await this.setBaseModel();
|
|
80
|
+
const response = (await this.caller.call(async () => this.baseModel.complete({
|
|
81
|
+
query: prompt,
|
|
82
|
+
...this.inferenceParameters,
|
|
83
|
+
})));
|
|
84
|
+
return response.generatedOutput;
|
|
85
|
+
}
|
|
86
|
+
async setBaseModel() {
|
|
87
|
+
if (this.baseModel)
|
|
88
|
+
return;
|
|
89
|
+
const gradient = new nodejs_sdk_1.Gradient({
|
|
90
|
+
accessToken: this.gradientAccessKey,
|
|
91
|
+
workspaceId: this.workspaceId,
|
|
92
|
+
});
|
|
93
|
+
this.baseModel = await gradient.getBaseModel({
|
|
94
|
+
baseModelSlug: this.modelSlug,
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
exports.GradientLLM = GradientLLM;
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { BaseLLMCallOptions, BaseLLMParams, LLM } from "./base.js";
|
|
2
|
+
/**
|
|
3
|
+
* The GradientLLMParams interface defines the input parameters for
|
|
4
|
+
* the GradientLLM class.
|
|
5
|
+
*/
|
|
6
|
+
export interface GradientLLMParams extends BaseLLMParams {
|
|
7
|
+
/**
|
|
8
|
+
* Gradient AI Access Token.
|
|
9
|
+
* Provide Access Token if you do not wish to automatically pull from env.
|
|
10
|
+
*/
|
|
11
|
+
gradientAccessKey?: string;
|
|
12
|
+
/**
|
|
13
|
+
* Gradient Workspace Id.
|
|
14
|
+
* Provide workspace id if you do not wish to automatically pull from env.
|
|
15
|
+
*/
|
|
16
|
+
workspaceId?: string;
|
|
17
|
+
/**
|
|
18
|
+
* Parameters accepted by the Gradient npm package.
|
|
19
|
+
*/
|
|
20
|
+
inferenceParameters?: Record<string, unknown>;
|
|
21
|
+
/**
|
|
22
|
+
* Gradient AI Model Slug.
|
|
23
|
+
*/
|
|
24
|
+
modelSlug?: string;
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* The GradientLLM class is used to interact with Gradient AI inference Endpoint models.
|
|
28
|
+
* This requires your Gradient AI Access Token which is autoloaded if not specified.
|
|
29
|
+
*/
|
|
30
|
+
export declare class GradientLLM extends LLM<BaseLLMCallOptions> {
|
|
31
|
+
static lc_name(): string;
|
|
32
|
+
get lc_secrets(): {
|
|
33
|
+
[key: string]: string;
|
|
34
|
+
} | undefined;
|
|
35
|
+
modelSlug: string;
|
|
36
|
+
gradientAccessKey?: string;
|
|
37
|
+
workspaceId?: string;
|
|
38
|
+
inferenceParameters?: Record<string, unknown>;
|
|
39
|
+
baseModel: any;
|
|
40
|
+
constructor(fields: GradientLLMParams);
|
|
41
|
+
_llmType(): string;
|
|
42
|
+
/**
|
|
43
|
+
* Calls the Gradient AI endpoint and retrieves the result.
|
|
44
|
+
* @param {string} prompt The input prompt.
|
|
45
|
+
* @returns {Promise<string>} A promise that resolves to the generated string.
|
|
46
|
+
*/
|
|
47
|
+
/** @ignore */
|
|
48
|
+
_call(prompt: string, _options: this["ParsedCallOptions"]): Promise<string>;
|
|
49
|
+
setBaseModel(): Promise<void>;
|
|
50
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { Gradient } from "@gradientai/nodejs-sdk";
|
|
2
|
+
import { LLM } from "./base.js";
|
|
3
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
4
|
+
/**
|
|
5
|
+
* The GradientLLM class is used to interact with Gradient AI inference Endpoint models.
|
|
6
|
+
* This requires your Gradient AI Access Token which is autoloaded if not specified.
|
|
7
|
+
*/
|
|
8
|
+
export class GradientLLM extends LLM {
|
|
9
|
+
static lc_name() {
|
|
10
|
+
return "GradientLLM";
|
|
11
|
+
}
|
|
12
|
+
get lc_secrets() {
|
|
13
|
+
return {
|
|
14
|
+
gradientAccessKey: "GRADIENT_ACCESS_TOKEN",
|
|
15
|
+
workspaceId: "GRADIENT_WORKSPACE_ID",
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
constructor(fields) {
|
|
19
|
+
super(fields);
|
|
20
|
+
Object.defineProperty(this, "modelSlug", {
|
|
21
|
+
enumerable: true,
|
|
22
|
+
configurable: true,
|
|
23
|
+
writable: true,
|
|
24
|
+
value: "llama2-7b-chat"
|
|
25
|
+
});
|
|
26
|
+
Object.defineProperty(this, "gradientAccessKey", {
|
|
27
|
+
enumerable: true,
|
|
28
|
+
configurable: true,
|
|
29
|
+
writable: true,
|
|
30
|
+
value: void 0
|
|
31
|
+
});
|
|
32
|
+
Object.defineProperty(this, "workspaceId", {
|
|
33
|
+
enumerable: true,
|
|
34
|
+
configurable: true,
|
|
35
|
+
writable: true,
|
|
36
|
+
value: void 0
|
|
37
|
+
});
|
|
38
|
+
Object.defineProperty(this, "inferenceParameters", {
|
|
39
|
+
enumerable: true,
|
|
40
|
+
configurable: true,
|
|
41
|
+
writable: true,
|
|
42
|
+
value: void 0
|
|
43
|
+
});
|
|
44
|
+
// Gradient AI does not export the BaseModel type. Once it does, we can use it here.
|
|
45
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
46
|
+
Object.defineProperty(this, "baseModel", {
|
|
47
|
+
enumerable: true,
|
|
48
|
+
configurable: true,
|
|
49
|
+
writable: true,
|
|
50
|
+
value: void 0
|
|
51
|
+
});
|
|
52
|
+
this.modelSlug = fields?.modelSlug ?? this.modelSlug;
|
|
53
|
+
this.gradientAccessKey =
|
|
54
|
+
fields?.gradientAccessKey ??
|
|
55
|
+
getEnvironmentVariable("GRADIENT_ACCESS_TOKEN");
|
|
56
|
+
this.workspaceId =
|
|
57
|
+
fields?.workspaceId ?? getEnvironmentVariable("GRADIENT_WORKSPACE_ID");
|
|
58
|
+
this.inferenceParameters = fields.inferenceParameters;
|
|
59
|
+
if (!this.gradientAccessKey) {
|
|
60
|
+
throw new Error("Missing Gradient AI Access Token");
|
|
61
|
+
}
|
|
62
|
+
if (!this.workspaceId) {
|
|
63
|
+
throw new Error("Missing Gradient AI Workspace ID");
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
_llmType() {
|
|
67
|
+
return "gradient_ai";
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Calls the Gradient AI endpoint and retrieves the result.
|
|
71
|
+
* @param {string} prompt The input prompt.
|
|
72
|
+
* @returns {Promise<string>} A promise that resolves to the generated string.
|
|
73
|
+
*/
|
|
74
|
+
/** @ignore */
|
|
75
|
+
async _call(prompt, _options) {
|
|
76
|
+
await this.setBaseModel();
|
|
77
|
+
const response = (await this.caller.call(async () => this.baseModel.complete({
|
|
78
|
+
query: prompt,
|
|
79
|
+
...this.inferenceParameters,
|
|
80
|
+
})));
|
|
81
|
+
return response.generatedOutput;
|
|
82
|
+
}
|
|
83
|
+
async setBaseModel() {
|
|
84
|
+
if (this.baseModel)
|
|
85
|
+
return;
|
|
86
|
+
const gradient = new Gradient({
|
|
87
|
+
accessToken: this.gradientAccessKey,
|
|
88
|
+
workspaceId: this.workspaceId,
|
|
89
|
+
});
|
|
90
|
+
this.baseModel = await gradient.getBaseModel({
|
|
91
|
+
baseModelSlug: this.modelSlug,
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
}
|
package/dist/llms/hf.cjs
CHANGED
|
@@ -76,6 +76,12 @@ class HuggingFaceInference extends base_js_1.LLM {
|
|
|
76
76
|
writable: true,
|
|
77
77
|
value: undefined
|
|
78
78
|
});
|
|
79
|
+
Object.defineProperty(this, "includeCredentials", {
|
|
80
|
+
enumerable: true,
|
|
81
|
+
configurable: true,
|
|
82
|
+
writable: true,
|
|
83
|
+
value: undefined
|
|
84
|
+
});
|
|
79
85
|
this.model = fields?.model ?? this.model;
|
|
80
86
|
this.temperature = fields?.temperature ?? this.temperature;
|
|
81
87
|
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
@@ -85,6 +91,7 @@ class HuggingFaceInference extends base_js_1.LLM {
|
|
|
85
91
|
this.apiKey =
|
|
86
92
|
fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("HUGGINGFACEHUB_API_KEY");
|
|
87
93
|
this.endpointUrl = fields?.endpointUrl;
|
|
94
|
+
this.includeCredentials = fields?.includeCredentials;
|
|
88
95
|
if (!this.apiKey) {
|
|
89
96
|
throw new Error("Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.");
|
|
90
97
|
}
|
|
@@ -96,8 +103,12 @@ class HuggingFaceInference extends base_js_1.LLM {
|
|
|
96
103
|
async _call(prompt, options) {
|
|
97
104
|
const { HfInference } = await HuggingFaceInference.imports();
|
|
98
105
|
const hf = this.endpointUrl
|
|
99
|
-
? new HfInference(this.apiKey
|
|
100
|
-
|
|
106
|
+
? new HfInference(this.apiKey, {
|
|
107
|
+
includeCredentials: this.includeCredentials,
|
|
108
|
+
}).endpoint(this.endpointUrl)
|
|
109
|
+
: new HfInference(this.apiKey, {
|
|
110
|
+
includeCredentials: this.includeCredentials,
|
|
111
|
+
});
|
|
101
112
|
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), {
|
|
102
113
|
model: this.model,
|
|
103
114
|
parameters: {
|
package/dist/llms/hf.d.ts
CHANGED
|
@@ -22,6 +22,10 @@ export interface HFInput {
|
|
|
22
22
|
frequencyPenalty?: number;
|
|
23
23
|
/** API key to use. */
|
|
24
24
|
apiKey?: string;
|
|
25
|
+
/**
|
|
26
|
+
* Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
|
|
27
|
+
*/
|
|
28
|
+
includeCredentials?: string | boolean;
|
|
25
29
|
}
|
|
26
30
|
/**
|
|
27
31
|
* Class implementing the Large Language Model (LLM) interface using the
|
|
@@ -52,6 +56,7 @@ export declare class HuggingFaceInference extends LLM implements HFInput {
|
|
|
52
56
|
frequencyPenalty: number | undefined;
|
|
53
57
|
apiKey: string | undefined;
|
|
54
58
|
endpointUrl: string | undefined;
|
|
59
|
+
includeCredentials: string | boolean | undefined;
|
|
55
60
|
constructor(fields?: Partial<HFInput> & BaseLLMParams);
|
|
56
61
|
_llmType(): string;
|
|
57
62
|
/** @ignore */
|
package/dist/llms/hf.js
CHANGED
|
@@ -73,6 +73,12 @@ export class HuggingFaceInference extends LLM {
|
|
|
73
73
|
writable: true,
|
|
74
74
|
value: undefined
|
|
75
75
|
});
|
|
76
|
+
Object.defineProperty(this, "includeCredentials", {
|
|
77
|
+
enumerable: true,
|
|
78
|
+
configurable: true,
|
|
79
|
+
writable: true,
|
|
80
|
+
value: undefined
|
|
81
|
+
});
|
|
76
82
|
this.model = fields?.model ?? this.model;
|
|
77
83
|
this.temperature = fields?.temperature ?? this.temperature;
|
|
78
84
|
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
@@ -82,6 +88,7 @@ export class HuggingFaceInference extends LLM {
|
|
|
82
88
|
this.apiKey =
|
|
83
89
|
fields?.apiKey ?? getEnvironmentVariable("HUGGINGFACEHUB_API_KEY");
|
|
84
90
|
this.endpointUrl = fields?.endpointUrl;
|
|
91
|
+
this.includeCredentials = fields?.includeCredentials;
|
|
85
92
|
if (!this.apiKey) {
|
|
86
93
|
throw new Error("Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.");
|
|
87
94
|
}
|
|
@@ -93,8 +100,12 @@ export class HuggingFaceInference extends LLM {
|
|
|
93
100
|
async _call(prompt, options) {
|
|
94
101
|
const { HfInference } = await HuggingFaceInference.imports();
|
|
95
102
|
const hf = this.endpointUrl
|
|
96
|
-
? new HfInference(this.apiKey
|
|
97
|
-
|
|
103
|
+
? new HfInference(this.apiKey, {
|
|
104
|
+
includeCredentials: this.includeCredentials,
|
|
105
|
+
}).endpoint(this.endpointUrl)
|
|
106
|
+
: new HfInference(this.apiKey, {
|
|
107
|
+
includeCredentials: this.includeCredentials,
|
|
108
|
+
});
|
|
98
109
|
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), {
|
|
99
110
|
model: this.model,
|
|
100
111
|
parameters: {
|
package/dist/llms/llama_cpp.cjs
CHANGED
|
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.LlamaCpp = void 0;
|
|
4
4
|
const llama_cpp_js_1 = require("../util/llama_cpp.cjs");
|
|
5
5
|
const base_js_1 = require("./base.cjs");
|
|
6
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
6
7
|
/**
|
|
7
8
|
* To use this model you need to have the `node-llama-cpp` module installed.
|
|
8
9
|
* This can be installed using `npm install -S node-llama-cpp` and the minimum
|
|
@@ -76,9 +77,7 @@ class LlamaCpp extends base_js_1.LLM {
|
|
|
76
77
|
return "llama2_cpp";
|
|
77
78
|
}
|
|
78
79
|
/** @ignore */
|
|
79
|
-
async _call(prompt,
|
|
80
|
-
// @ts-expect-error - TS6133: 'options' is declared but its value is never read.
|
|
81
|
-
options) {
|
|
80
|
+
async _call(prompt, _options) {
|
|
82
81
|
try {
|
|
83
82
|
const promptOptions = {
|
|
84
83
|
maxTokens: this?.maxTokens,
|
|
@@ -94,5 +93,20 @@ class LlamaCpp extends base_js_1.LLM {
|
|
|
94
93
|
throw new Error("Error getting prompt completion.");
|
|
95
94
|
}
|
|
96
95
|
}
|
|
96
|
+
async *_streamResponseChunks(prompt, _options, runManager) {
|
|
97
|
+
const promptOptions = {
|
|
98
|
+
temperature: this?.temperature,
|
|
99
|
+
topK: this?.topK,
|
|
100
|
+
topP: this?.topP,
|
|
101
|
+
};
|
|
102
|
+
const stream = await this.caller.call(async () => this._context.evaluate(this._context.encode(prompt), promptOptions));
|
|
103
|
+
for await (const chunk of stream) {
|
|
104
|
+
yield new index_js_1.GenerationChunk({
|
|
105
|
+
text: this._context.decode([chunk]),
|
|
106
|
+
generationInfo: {},
|
|
107
|
+
});
|
|
108
|
+
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
109
|
+
}
|
|
110
|
+
}
|
|
97
111
|
}
|
|
98
112
|
exports.LlamaCpp = LlamaCpp;
|
package/dist/llms/llama_cpp.d.ts
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
|
|
2
2
|
import { LlamaBaseCppInputs } from "../util/llama_cpp.js";
|
|
3
3
|
import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
|
|
4
|
+
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
5
|
+
import { GenerationChunk } from "../schema/index.js";
|
|
4
6
|
/**
|
|
5
7
|
* Note that the modelPath is the only required parameter. For testing you
|
|
6
8
|
* can set this in the environment variable `LLAMA_PATH`.
|
|
@@ -34,5 +36,6 @@ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
|
|
|
34
36
|
constructor(inputs: LlamaCppInputs);
|
|
35
37
|
_llmType(): string;
|
|
36
38
|
/** @ignore */
|
|
37
|
-
_call(prompt: string,
|
|
39
|
+
_call(prompt: string, _options?: this["ParsedCallOptions"]): Promise<string>;
|
|
40
|
+
_streamResponseChunks(prompt: string, _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
|
|
38
41
|
}
|
package/dist/llms/llama_cpp.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { createLlamaModel, createLlamaContext, createLlamaSession, } from "../util/llama_cpp.js";
|
|
2
2
|
import { LLM } from "./base.js";
|
|
3
|
+
import { GenerationChunk } from "../schema/index.js";
|
|
3
4
|
/**
|
|
4
5
|
* To use this model you need to have the `node-llama-cpp` module installed.
|
|
5
6
|
* This can be installed using `npm install -S node-llama-cpp` and the minimum
|
|
@@ -73,9 +74,7 @@ export class LlamaCpp extends LLM {
|
|
|
73
74
|
return "llama2_cpp";
|
|
74
75
|
}
|
|
75
76
|
/** @ignore */
|
|
76
|
-
async _call(prompt,
|
|
77
|
-
// @ts-expect-error - TS6133: 'options' is declared but its value is never read.
|
|
78
|
-
options) {
|
|
77
|
+
async _call(prompt, _options) {
|
|
79
78
|
try {
|
|
80
79
|
const promptOptions = {
|
|
81
80
|
maxTokens: this?.maxTokens,
|
|
@@ -91,4 +90,19 @@ export class LlamaCpp extends LLM {
|
|
|
91
90
|
throw new Error("Error getting prompt completion.");
|
|
92
91
|
}
|
|
93
92
|
}
|
|
93
|
+
async *_streamResponseChunks(prompt, _options, runManager) {
|
|
94
|
+
const promptOptions = {
|
|
95
|
+
temperature: this?.temperature,
|
|
96
|
+
topK: this?.topK,
|
|
97
|
+
topP: this?.topP,
|
|
98
|
+
};
|
|
99
|
+
const stream = await this.caller.call(async () => this._context.evaluate(this._context.encode(prompt), promptOptions));
|
|
100
|
+
for await (const chunk of stream) {
|
|
101
|
+
yield new GenerationChunk({
|
|
102
|
+
text: this._context.decode([chunk]),
|
|
103
|
+
generationInfo: {},
|
|
104
|
+
});
|
|
105
|
+
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
106
|
+
}
|
|
107
|
+
}
|
|
94
108
|
}
|