langchain 0.0.174 → 0.0.176
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models/googlevertexai/common.cjs +46 -7
- package/dist/chat_models/googlevertexai/common.d.ts +7 -2
- package/dist/chat_models/googlevertexai/common.js +47 -8
- package/dist/chat_models/googlevertexai/index.cjs +4 -3
- package/dist/chat_models/googlevertexai/index.js +4 -3
- package/dist/chat_models/googlevertexai/web.cjs +2 -1
- package/dist/chat_models/googlevertexai/web.js +2 -1
- package/dist/embeddings/googlevertexai.cjs +1 -1
- package/dist/embeddings/googlevertexai.js +1 -1
- package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +2 -2
- package/dist/experimental/multimodal_embeddings/googlevertexai.cjs +1 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.d.ts +2 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.js +2 -2
- package/dist/experimental/plan_and_execute/agent_executor.cjs +7 -4
- package/dist/experimental/plan_and_execute/agent_executor.d.ts +4 -3
- package/dist/experimental/plan_and_execute/agent_executor.js +8 -5
- package/dist/experimental/plan_and_execute/prompt.cjs +25 -9
- package/dist/experimental/plan_and_execute/prompt.d.ts +9 -1
- package/dist/experimental/plan_and_execute/prompt.js +23 -8
- package/dist/llms/googlevertexai/common.cjs +46 -13
- package/dist/llms/googlevertexai/common.d.ts +8 -3
- package/dist/llms/googlevertexai/common.js +46 -13
- package/dist/llms/googlevertexai/index.cjs +4 -3
- package/dist/llms/googlevertexai/index.js +4 -3
- package/dist/llms/googlevertexai/web.cjs +2 -1
- package/dist/llms/googlevertexai/web.js +2 -1
- package/dist/load/import_constants.cjs +4 -0
- package/dist/load/import_constants.js +4 -0
- package/dist/storage/convex.cjs +145 -0
- package/dist/storage/convex.d.ts +85 -0
- package/dist/storage/convex.js +141 -0
- package/dist/stores/message/convex.cjs +120 -0
- package/dist/stores/message/convex.d.ts +60 -0
- package/dist/stores/message/convex.js +116 -0
- package/dist/types/googlevertexai-types.d.ts +12 -10
- package/dist/util/convex.cjs +77 -0
- package/dist/util/convex.d.ts +26 -0
- package/dist/util/convex.js +74 -0
- package/dist/util/googlevertexai-connection.cjs +298 -10
- package/dist/util/googlevertexai-connection.d.ts +76 -7
- package/dist/util/googlevertexai-connection.js +294 -9
- package/dist/util/googlevertexai-gauth.cjs +36 -0
- package/dist/util/googlevertexai-gauth.d.ts +8 -0
- package/dist/util/googlevertexai-gauth.js +32 -0
- package/dist/util/googlevertexai-webauth.cjs +38 -2
- package/dist/util/googlevertexai-webauth.d.ts +2 -6
- package/dist/util/googlevertexai-webauth.js +38 -2
- package/dist/vectorstores/convex.cjs +177 -0
- package/dist/vectorstores/convex.d.ts +113 -0
- package/dist/vectorstores/convex.js +173 -0
- package/dist/vectorstores/googlevertexai.d.ts +4 -4
- package/dist/vectorstores/milvus.cjs +4 -2
- package/dist/vectorstores/milvus.js +4 -2
- package/dist/vectorstores/vercel_postgres.cjs +29 -7
- package/dist/vectorstores/vercel_postgres.d.ts +1 -1
- package/dist/vectorstores/vercel_postgres.js +29 -7
- package/package.json +38 -1
- package/storage/convex.cjs +1 -0
- package/storage/convex.d.ts +1 -0
- package/storage/convex.js +1 -0
- package/stores/message/convex.cjs +1 -0
- package/stores/message/convex.d.ts +1 -0
- package/stores/message/convex.js +1 -0
- package/util/convex.cjs +1 -0
- package/util/convex.d.ts +1 -0
- package/util/convex.js +1 -0
- package/vectorstores/convex.cjs +1 -0
- package/vectorstores/convex.d.ts +1 -0
- package/vectorstores/convex.js +1 -0
|
@@ -144,6 +144,12 @@ class BaseChatGoogleVertexAI extends base_js_1.BaseChatModel {
|
|
|
144
144
|
writable: true,
|
|
145
145
|
value: void 0
|
|
146
146
|
});
|
|
147
|
+
Object.defineProperty(this, "streamedConnection", {
|
|
148
|
+
enumerable: true,
|
|
149
|
+
configurable: true,
|
|
150
|
+
writable: true,
|
|
151
|
+
value: void 0
|
|
152
|
+
});
|
|
147
153
|
this.model = fields?.model ?? this.model;
|
|
148
154
|
this.temperature = fields?.temperature ?? this.temperature;
|
|
149
155
|
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
|
|
@@ -155,15 +161,31 @@ class BaseChatGoogleVertexAI extends base_js_1.BaseChatModel {
|
|
|
155
161
|
// TODO: Combine the safetyAttributes
|
|
156
162
|
return [];
|
|
157
163
|
}
|
|
158
|
-
|
|
164
|
+
async *_streamResponseChunks(_messages, _options, _runManager) {
|
|
165
|
+
// Make the call as a streaming request
|
|
166
|
+
const instance = this.createInstance(_messages);
|
|
167
|
+
const parameters = this.formatParameters();
|
|
168
|
+
const result = await this.streamedConnection.request([instance], parameters, _options);
|
|
169
|
+
// Get the streaming parser of the response
|
|
170
|
+
const stream = result.data;
|
|
171
|
+
// Loop until the end of the stream
|
|
172
|
+
// During the loop, yield each time we get a chunk from the streaming parser
|
|
173
|
+
// that is either available or added to the queue
|
|
174
|
+
while (!stream.streamDone) {
|
|
175
|
+
const output = await stream.nextChunk();
|
|
176
|
+
const chunk = output !== null
|
|
177
|
+
? BaseChatGoogleVertexAI.convertPredictionChunk(output)
|
|
178
|
+
: new index_js_1.ChatGenerationChunk({
|
|
179
|
+
text: "",
|
|
180
|
+
message: new index_js_1.AIMessageChunk(""),
|
|
181
|
+
generationInfo: { finishReason: "stop" },
|
|
182
|
+
});
|
|
183
|
+
yield chunk;
|
|
184
|
+
}
|
|
185
|
+
}
|
|
159
186
|
async _generate(messages, options) {
|
|
160
187
|
const instance = this.createInstance(messages);
|
|
161
|
-
const parameters =
|
|
162
|
-
temperature: this.temperature,
|
|
163
|
-
topK: this.topK,
|
|
164
|
-
topP: this.topP,
|
|
165
|
-
maxOutputTokens: this.maxOutputTokens,
|
|
166
|
-
};
|
|
188
|
+
const parameters = this.formatParameters();
|
|
167
189
|
const result = await this.connection.request([instance], parameters, options);
|
|
168
190
|
const generations = result?.data?.predictions?.map((prediction) => BaseChatGoogleVertexAI.convertPrediction(prediction)) ?? [];
|
|
169
191
|
return {
|
|
@@ -211,6 +233,14 @@ class BaseChatGoogleVertexAI extends base_js_1.BaseChatModel {
|
|
|
211
233
|
};
|
|
212
234
|
return instance;
|
|
213
235
|
}
|
|
236
|
+
formatParameters() {
|
|
237
|
+
return {
|
|
238
|
+
temperature: this.temperature,
|
|
239
|
+
topK: this.topK,
|
|
240
|
+
topP: this.topP,
|
|
241
|
+
maxOutputTokens: this.maxOutputTokens,
|
|
242
|
+
};
|
|
243
|
+
}
|
|
214
244
|
/**
|
|
215
245
|
* Converts a prediction from the Google Vertex AI chat model to a chat
|
|
216
246
|
* generation.
|
|
@@ -225,5 +255,14 @@ class BaseChatGoogleVertexAI extends base_js_1.BaseChatModel {
|
|
|
225
255
|
generationInfo: prediction,
|
|
226
256
|
};
|
|
227
257
|
}
|
|
258
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
259
|
+
static convertPredictionChunk(output) {
|
|
260
|
+
const generation = BaseChatGoogleVertexAI.convertPrediction(output.outputs[0]);
|
|
261
|
+
return new index_js_1.ChatGenerationChunk({
|
|
262
|
+
text: generation.text,
|
|
263
|
+
message: new index_js_1.AIMessageChunk(generation.message),
|
|
264
|
+
generationInfo: generation.generationInfo,
|
|
265
|
+
});
|
|
266
|
+
}
|
|
228
267
|
}
|
|
229
268
|
exports.BaseChatGoogleVertexAI = BaseChatGoogleVertexAI;
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import { BaseChatModel } from "../base.js";
|
|
2
|
-
import { BaseMessage, ChatGeneration, ChatMessage, ChatResult, LLMResult } from "../../schema/index.js";
|
|
2
|
+
import { BaseMessage, ChatGeneration, ChatGenerationChunk, ChatMessage, ChatResult, LLMResult } from "../../schema/index.js";
|
|
3
3
|
import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
|
|
4
|
-
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction } from "../../types/googlevertexai-types.js";
|
|
4
|
+
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAIModelParams } from "../../types/googlevertexai-types.js";
|
|
5
5
|
import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
|
|
6
|
+
import { CallbackManagerForLLMRun } from "../../callbacks/index.js";
|
|
6
7
|
/**
|
|
7
8
|
* Represents a single "example" exchange that can be provided to
|
|
8
9
|
* help illustrate what a model response should look like.
|
|
@@ -96,9 +97,11 @@ export declare class BaseChatGoogleVertexAI<AuthOptions> extends BaseChatModel i
|
|
|
96
97
|
topK: number;
|
|
97
98
|
examples: ChatExample[];
|
|
98
99
|
connection: GoogleVertexAILLMConnection<BaseLanguageModelCallOptions, GoogleVertexAIChatInstance, GoogleVertexAIChatPrediction, AuthOptions>;
|
|
100
|
+
streamedConnection: GoogleVertexAILLMConnection<BaseLanguageModelCallOptions, GoogleVertexAIChatInstance, GoogleVertexAIChatPrediction, AuthOptions>;
|
|
99
101
|
get lc_aliases(): Record<string, string>;
|
|
100
102
|
constructor(fields?: GoogleVertexAIChatInput<AuthOptions>);
|
|
101
103
|
_combineLLMOutput(): LLMResult["llmOutput"];
|
|
104
|
+
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
102
105
|
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<ChatResult>;
|
|
103
106
|
_llmType(): string;
|
|
104
107
|
/**
|
|
@@ -107,6 +110,7 @@ export declare class BaseChatGoogleVertexAI<AuthOptions> extends BaseChatModel i
|
|
|
107
110
|
* @returns A new instance of the Google Vertex AI chat model.
|
|
108
111
|
*/
|
|
109
112
|
createInstance(messages: BaseMessage[]): GoogleVertexAIChatInstance;
|
|
113
|
+
formatParameters(): GoogleVertexAIModelParams;
|
|
110
114
|
/**
|
|
111
115
|
* Converts a prediction from the Google Vertex AI chat model to a chat
|
|
112
116
|
* generation.
|
|
@@ -114,5 +118,6 @@ export declare class BaseChatGoogleVertexAI<AuthOptions> extends BaseChatModel i
|
|
|
114
118
|
* @returns The converted chat generation.
|
|
115
119
|
*/
|
|
116
120
|
static convertPrediction(prediction: GoogleVertexAIChatPrediction): ChatGeneration;
|
|
121
|
+
static convertPredictionChunk(output: any): ChatGenerationChunk;
|
|
117
122
|
}
|
|
118
123
|
export {};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { BaseChatModel } from "../base.js";
|
|
2
|
-
import { AIMessage, ChatMessage, } from "../../schema/index.js";
|
|
2
|
+
import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, } from "../../schema/index.js";
|
|
3
3
|
/**
|
|
4
4
|
* Represents a chat message in the Google Vertex AI chat model.
|
|
5
5
|
*/
|
|
@@ -140,6 +140,12 @@ export class BaseChatGoogleVertexAI extends BaseChatModel {
|
|
|
140
140
|
writable: true,
|
|
141
141
|
value: void 0
|
|
142
142
|
});
|
|
143
|
+
Object.defineProperty(this, "streamedConnection", {
|
|
144
|
+
enumerable: true,
|
|
145
|
+
configurable: true,
|
|
146
|
+
writable: true,
|
|
147
|
+
value: void 0
|
|
148
|
+
});
|
|
143
149
|
this.model = fields?.model ?? this.model;
|
|
144
150
|
this.temperature = fields?.temperature ?? this.temperature;
|
|
145
151
|
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
|
|
@@ -151,15 +157,31 @@ export class BaseChatGoogleVertexAI extends BaseChatModel {
|
|
|
151
157
|
// TODO: Combine the safetyAttributes
|
|
152
158
|
return [];
|
|
153
159
|
}
|
|
154
|
-
|
|
160
|
+
async *_streamResponseChunks(_messages, _options, _runManager) {
|
|
161
|
+
// Make the call as a streaming request
|
|
162
|
+
const instance = this.createInstance(_messages);
|
|
163
|
+
const parameters = this.formatParameters();
|
|
164
|
+
const result = await this.streamedConnection.request([instance], parameters, _options);
|
|
165
|
+
// Get the streaming parser of the response
|
|
166
|
+
const stream = result.data;
|
|
167
|
+
// Loop until the end of the stream
|
|
168
|
+
// During the loop, yield each time we get a chunk from the streaming parser
|
|
169
|
+
// that is either available or added to the queue
|
|
170
|
+
while (!stream.streamDone) {
|
|
171
|
+
const output = await stream.nextChunk();
|
|
172
|
+
const chunk = output !== null
|
|
173
|
+
? BaseChatGoogleVertexAI.convertPredictionChunk(output)
|
|
174
|
+
: new ChatGenerationChunk({
|
|
175
|
+
text: "",
|
|
176
|
+
message: new AIMessageChunk(""),
|
|
177
|
+
generationInfo: { finishReason: "stop" },
|
|
178
|
+
});
|
|
179
|
+
yield chunk;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
155
182
|
async _generate(messages, options) {
|
|
156
183
|
const instance = this.createInstance(messages);
|
|
157
|
-
const parameters =
|
|
158
|
-
temperature: this.temperature,
|
|
159
|
-
topK: this.topK,
|
|
160
|
-
topP: this.topP,
|
|
161
|
-
maxOutputTokens: this.maxOutputTokens,
|
|
162
|
-
};
|
|
184
|
+
const parameters = this.formatParameters();
|
|
163
185
|
const result = await this.connection.request([instance], parameters, options);
|
|
164
186
|
const generations = result?.data?.predictions?.map((prediction) => BaseChatGoogleVertexAI.convertPrediction(prediction)) ?? [];
|
|
165
187
|
return {
|
|
@@ -207,6 +229,14 @@ export class BaseChatGoogleVertexAI extends BaseChatModel {
|
|
|
207
229
|
};
|
|
208
230
|
return instance;
|
|
209
231
|
}
|
|
232
|
+
formatParameters() {
|
|
233
|
+
return {
|
|
234
|
+
temperature: this.temperature,
|
|
235
|
+
topK: this.topK,
|
|
236
|
+
topP: this.topP,
|
|
237
|
+
maxOutputTokens: this.maxOutputTokens,
|
|
238
|
+
};
|
|
239
|
+
}
|
|
210
240
|
/**
|
|
211
241
|
* Converts a prediction from the Google Vertex AI chat model to a chat
|
|
212
242
|
* generation.
|
|
@@ -221,4 +251,13 @@ export class BaseChatGoogleVertexAI extends BaseChatModel {
|
|
|
221
251
|
generationInfo: prediction,
|
|
222
252
|
};
|
|
223
253
|
}
|
|
254
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
255
|
+
static convertPredictionChunk(output) {
|
|
256
|
+
const generation = BaseChatGoogleVertexAI.convertPrediction(output.outputs[0]);
|
|
257
|
+
return new ChatGenerationChunk({
|
|
258
|
+
text: generation.text,
|
|
259
|
+
message: new AIMessageChunk(generation.message),
|
|
260
|
+
generationInfo: generation.generationInfo,
|
|
261
|
+
});
|
|
262
|
+
}
|
|
224
263
|
}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.ChatGoogleVertexAI = void 0;
|
|
4
|
-
const google_auth_library_1 = require("google-auth-library");
|
|
5
4
|
const common_js_1 = require("./common.cjs");
|
|
6
5
|
const googlevertexai_connection_js_1 = require("../../util/googlevertexai-connection.cjs");
|
|
6
|
+
const googlevertexai_gauth_js_1 = require("../../util/googlevertexai-gauth.cjs");
|
|
7
7
|
/**
|
|
8
8
|
* Enables calls to the Google Cloud's Vertex AI API to access
|
|
9
9
|
* Large Language Models in a chat-like fashion.
|
|
@@ -24,11 +24,12 @@ class ChatGoogleVertexAI extends common_js_1.BaseChatGoogleVertexAI {
|
|
|
24
24
|
}
|
|
25
25
|
constructor(fields) {
|
|
26
26
|
super(fields);
|
|
27
|
-
const client = new
|
|
27
|
+
const client = new googlevertexai_gauth_js_1.GAuthClient({
|
|
28
28
|
scopes: "https://www.googleapis.com/auth/cloud-platform",
|
|
29
29
|
...fields?.authOptions,
|
|
30
30
|
});
|
|
31
|
-
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
|
|
31
|
+
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
|
|
32
|
+
this.streamedConnection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
|
|
32
33
|
}
|
|
33
34
|
}
|
|
34
35
|
exports.ChatGoogleVertexAI = ChatGoogleVertexAI;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { GoogleAuth } from "google-auth-library";
|
|
2
1
|
import { BaseChatGoogleVertexAI } from "./common.js";
|
|
3
2
|
import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
|
|
3
|
+
import { GAuthClient } from "../../util/googlevertexai-gauth.js";
|
|
4
4
|
/**
|
|
5
5
|
* Enables calls to the Google Cloud's Vertex AI API to access
|
|
6
6
|
* Large Language Models in a chat-like fashion.
|
|
@@ -21,10 +21,11 @@ export class ChatGoogleVertexAI extends BaseChatGoogleVertexAI {
|
|
|
21
21
|
}
|
|
22
22
|
constructor(fields) {
|
|
23
23
|
super(fields);
|
|
24
|
-
const client = new
|
|
24
|
+
const client = new GAuthClient({
|
|
25
25
|
scopes: "https://www.googleapis.com/auth/cloud-platform",
|
|
26
26
|
...fields?.authOptions,
|
|
27
27
|
});
|
|
28
|
-
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
|
|
28
|
+
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
|
|
29
|
+
this.streamedConnection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
|
|
29
30
|
}
|
|
30
31
|
}
|
|
@@ -25,7 +25,8 @@ class ChatGoogleVertexAI extends common_js_1.BaseChatGoogleVertexAI {
|
|
|
25
25
|
constructor(fields) {
|
|
26
26
|
super(fields);
|
|
27
27
|
const client = new googlevertexai_webauth_js_1.WebGoogleAuth(fields?.authOptions);
|
|
28
|
-
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
|
|
28
|
+
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
|
|
29
|
+
this.streamedConnection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
|
|
29
30
|
}
|
|
30
31
|
}
|
|
31
32
|
exports.ChatGoogleVertexAI = ChatGoogleVertexAI;
|
|
@@ -22,6 +22,7 @@ export class ChatGoogleVertexAI extends BaseChatGoogleVertexAI {
|
|
|
22
22
|
constructor(fields) {
|
|
23
23
|
super(fields);
|
|
24
24
|
const client = new WebGoogleAuth(fields?.authOptions);
|
|
25
|
-
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
|
|
25
|
+
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
|
|
26
|
+
this.streamedConnection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
|
|
26
27
|
}
|
|
27
28
|
}
|
|
@@ -56,7 +56,7 @@ class GoogleVertexAIEmbeddings extends base_js_1.Embeddings {
|
|
|
56
56
|
const options = {};
|
|
57
57
|
const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
|
|
58
58
|
const result = responses
|
|
59
|
-
?.map((response) => response
|
|
59
|
+
?.map((response) => response?.data?.predictions?.map((result) => result.embeddings.values) ?? [])
|
|
60
60
|
.flat() ?? [];
|
|
61
61
|
return result;
|
|
62
62
|
}
|
|
@@ -53,7 +53,7 @@ export class GoogleVertexAIEmbeddings extends Embeddings {
|
|
|
53
53
|
const options = {};
|
|
54
54
|
const responses = await Promise.all(instanceChunks.map((instances) => this.connection.request(instances, parameters, options)));
|
|
55
55
|
const result = responses
|
|
56
|
-
?.map((response) => response
|
|
56
|
+
?.map((response) => response?.data?.predictions?.map((result) => result.embeddings.values) ?? [])
|
|
57
57
|
.flat() ?? [];
|
|
58
58
|
return result;
|
|
59
59
|
}
|
|
@@ -4,7 +4,7 @@ import { GoogleAuthOptions } from "google-auth-library";
|
|
|
4
4
|
import { PromptTemplate } from "../../../prompts/index.js";
|
|
5
5
|
import { BaseLanguageModel } from "../../../base_language/index.js";
|
|
6
6
|
import { AsyncCaller, AsyncCallerCallOptions } from "../../../util/async_caller.js";
|
|
7
|
-
import { GoogleResponse, GoogleVertexAIConnectionParams } from "../../../types/googlevertexai-types.js";
|
|
7
|
+
import { GoogleAbstractedClientOpsMethod, GoogleResponse, GoogleVertexAIConnectionParams } from "../../../types/googlevertexai-types.js";
|
|
8
8
|
import { GoogleConnection } from "../../../util/googlevertexai-connection.js";
|
|
9
9
|
/**
|
|
10
10
|
* Configuration that allows us to load or pull a prompt that has been created
|
|
@@ -134,7 +134,7 @@ export declare class DriveFileReadConnection extends GoogleConnection<DriveCallO
|
|
|
134
134
|
fileId: string;
|
|
135
135
|
constructor(fields: DriveFileReadParams, caller: AsyncCaller);
|
|
136
136
|
buildUrl(): Promise<string>;
|
|
137
|
-
buildMethod():
|
|
137
|
+
buildMethod(): GoogleAbstractedClientOpsMethod;
|
|
138
138
|
request(options?: DriveCallOptions): Promise<DriveFileMakerSuiteResponse>;
|
|
139
139
|
}
|
|
140
140
|
export interface CacheEntry {
|
|
@@ -54,7 +54,7 @@ class GoogleVertexAIMultimodalEmbeddings extends base_js_1.Embeddings {
|
|
|
54
54
|
* @returns An array of media embeddings.
|
|
55
55
|
*/
|
|
56
56
|
responseToEmbeddings(response) {
|
|
57
|
-
return response
|
|
57
|
+
return (response?.data).predictions.map((r) => ({
|
|
58
58
|
text: r.textEmbedding,
|
|
59
59
|
image: r.imageEmbedding,
|
|
60
60
|
}));
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
/// <reference types="node" resolution-mode="require"/>
|
|
2
2
|
import { GoogleAuthOptions } from "google-auth-library";
|
|
3
3
|
import { Embeddings, EmbeddingsParams } from "../../embeddings/base.js";
|
|
4
|
-
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction
|
|
4
|
+
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction } from "../../types/googlevertexai-types.js";
|
|
5
|
+
import { GoogleVertexAILLMResponse } from "../../util/googlevertexai-connection.js";
|
|
5
6
|
/**
|
|
6
7
|
* Parameters for the GoogleVertexAIMultimodalEmbeddings class, extending
|
|
7
8
|
* both EmbeddingsParams and GoogleVertexAIConnectionParams.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { GoogleAuth } from "google-auth-library";
|
|
2
2
|
import { Embeddings } from "../../embeddings/base.js";
|
|
3
|
-
import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
|
|
3
|
+
import { GoogleVertexAILLMConnection, } from "../../util/googlevertexai-connection.js";
|
|
4
4
|
/**
|
|
5
5
|
* Class for generating embeddings for text and images using Google's
|
|
6
6
|
* Vertex AI. It extends the Embeddings base class and implements the
|
|
@@ -51,7 +51,7 @@ export class GoogleVertexAIMultimodalEmbeddings extends Embeddings {
|
|
|
51
51
|
* @returns An array of media embeddings.
|
|
52
52
|
*/
|
|
53
53
|
responseToEmbeddings(response) {
|
|
54
|
-
return response
|
|
54
|
+
return (response?.data).predictions.map((r) => ({
|
|
55
55
|
text: r.textEmbedding,
|
|
56
56
|
image: r.imageEmbedding,
|
|
57
57
|
}));
|
|
@@ -69,10 +69,10 @@ class PlanAndExecuteAgentExecutor extends base_js_1.BaseChain {
|
|
|
69
69
|
* @param llm The Large Language Model (LLM) used to generate responses.
|
|
70
70
|
* @returns A new LLMPlanner instance.
|
|
71
71
|
*/
|
|
72
|
-
static getDefaultPlanner({ llm }) {
|
|
72
|
+
static async getDefaultPlanner({ llm, tools, }) {
|
|
73
73
|
const plannerLlmChain = new llm_chain_js_1.LLMChain({
|
|
74
74
|
llm,
|
|
75
|
-
prompt: prompt_js_1.
|
|
75
|
+
prompt: await (0, prompt_js_1.getPlannerChatPrompt)(tools),
|
|
76
76
|
});
|
|
77
77
|
return new base_js_2.LLMPlanner(plannerLlmChain, new outputParser_js_1.PlanOutputParser());
|
|
78
78
|
}
|
|
@@ -104,9 +104,12 @@ class PlanAndExecuteAgentExecutor extends base_js_1.BaseChain {
|
|
|
104
104
|
* @param humanMessageTemplate The template for human messages. If not provided, a default template is used.
|
|
105
105
|
* @returns A new PlanAndExecuteAgentExecutor instance.
|
|
106
106
|
*/
|
|
107
|
-
static fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
107
|
+
static async fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
108
108
|
const executor = new PlanAndExecuteAgentExecutor({
|
|
109
|
-
planner: PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
109
|
+
planner: await PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
110
|
+
llm,
|
|
111
|
+
tools,
|
|
112
|
+
}),
|
|
110
113
|
stepExecutor: PlanAndExecuteAgentExecutor.getDefaultStepExecutor({
|
|
111
114
|
llm,
|
|
112
115
|
tools,
|
|
@@ -40,9 +40,10 @@ export declare class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
40
40
|
* @param llm The Large Language Model (LLM) used to generate responses.
|
|
41
41
|
* @returns A new LLMPlanner instance.
|
|
42
42
|
*/
|
|
43
|
-
static getDefaultPlanner({ llm }: {
|
|
43
|
+
static getDefaultPlanner({ llm, tools, }: {
|
|
44
44
|
llm: BaseLanguageModel;
|
|
45
|
-
|
|
45
|
+
tools: Tool[];
|
|
46
|
+
}): Promise<LLMPlanner>;
|
|
46
47
|
/**
|
|
47
48
|
* Static method that returns a default step executor for the agent. It
|
|
48
49
|
* creates a new ChatAgent from a given LLM and a set of tools, and uses
|
|
@@ -71,7 +72,7 @@ export declare class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
71
72
|
llm: BaseLanguageModel;
|
|
72
73
|
tools: Tool[];
|
|
73
74
|
humanMessageTemplate?: string;
|
|
74
|
-
} & Omit<PlanAndExecuteAgentExecutorInput, "planner" | "stepExecutor">): PlanAndExecuteAgentExecutor
|
|
75
|
+
} & Omit<PlanAndExecuteAgentExecutorInput, "planner" | "stepExecutor">): Promise<PlanAndExecuteAgentExecutor>;
|
|
75
76
|
/** @ignore */
|
|
76
77
|
_call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
|
|
77
78
|
_chainType(): "agent_executor";
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { BaseChain } from "../../chains/base.js";
|
|
2
2
|
import { ListStepContainer, LLMPlanner, ChainStepExecutor, } from "./base.js";
|
|
3
3
|
import { AgentExecutor } from "../../agents/executor.js";
|
|
4
|
-
import { DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE,
|
|
4
|
+
import { DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE, getPlannerChatPrompt, } from "./prompt.js";
|
|
5
5
|
import { LLMChain } from "../../chains/llm_chain.js";
|
|
6
6
|
import { PlanOutputParser } from "./outputParser.js";
|
|
7
7
|
import { ChatAgent } from "../../agents/chat/index.js";
|
|
@@ -66,10 +66,10 @@ export class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
66
66
|
* @param llm The Large Language Model (LLM) used to generate responses.
|
|
67
67
|
* @returns A new LLMPlanner instance.
|
|
68
68
|
*/
|
|
69
|
-
static getDefaultPlanner({ llm }) {
|
|
69
|
+
static async getDefaultPlanner({ llm, tools, }) {
|
|
70
70
|
const plannerLlmChain = new LLMChain({
|
|
71
71
|
llm,
|
|
72
|
-
prompt:
|
|
72
|
+
prompt: await getPlannerChatPrompt(tools),
|
|
73
73
|
});
|
|
74
74
|
return new LLMPlanner(plannerLlmChain, new PlanOutputParser());
|
|
75
75
|
}
|
|
@@ -101,9 +101,12 @@ export class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
101
101
|
* @param humanMessageTemplate The template for human messages. If not provided, a default template is used.
|
|
102
102
|
* @returns A new PlanAndExecuteAgentExecutor instance.
|
|
103
103
|
*/
|
|
104
|
-
static fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
104
|
+
static async fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
105
105
|
const executor = new PlanAndExecuteAgentExecutor({
|
|
106
|
-
planner: PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
106
|
+
planner: await PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
107
|
+
llm,
|
|
108
|
+
tools,
|
|
109
|
+
}),
|
|
107
110
|
stepExecutor: PlanAndExecuteAgentExecutor.getDefaultStepExecutor({
|
|
108
111
|
llm,
|
|
109
112
|
tools,
|
|
@@ -1,23 +1,22 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.
|
|
3
|
+
exports.getPlannerChatPrompt = exports.DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = void 0;
|
|
4
4
|
const chat_js_1 = require("../../prompts/chat.cjs");
|
|
5
5
|
exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
6
6
|
`Let's first understand the problem and devise a plan to solve the problem.`,
|
|
7
7
|
`Please output the plan starting with the header "Plan:"`,
|
|
8
|
-
`
|
|
8
|
+
`followed by a numbered list of steps.`,
|
|
9
9
|
`Please make the plan the minimum number of steps required`,
|
|
10
10
|
`to answer the query or complete the task accurately and precisely.`,
|
|
11
|
-
`
|
|
12
|
-
|
|
11
|
+
`You have a set of tools at your disposal to help you with this task:`,
|
|
12
|
+
"",
|
|
13
|
+
"{toolStrings}",
|
|
14
|
+
"",
|
|
15
|
+
`You must consider these tools when coming up with your plan.`,
|
|
16
|
+
`If the task is a question, the final step in the plan must be the following: "Given the above steps taken,`,
|
|
13
17
|
`please respond to the original query."`,
|
|
14
18
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
15
19
|
].join(" ");
|
|
16
|
-
exports.PLANNER_CHAT_PROMPT =
|
|
17
|
-
/* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
|
|
18
|
-
/* #__PURE__ */ chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
19
|
-
/* #__PURE__ */ chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
20
|
-
]);
|
|
21
20
|
exports.DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = `Previous steps: {previous_steps}
|
|
22
21
|
|
|
23
22
|
Current objective: {current_step}
|
|
@@ -25,3 +24,20 @@ Current objective: {current_step}
|
|
|
25
24
|
{agent_scratchpad}
|
|
26
25
|
|
|
27
26
|
You may extract and combine relevant data from your previous steps when responding to me.`;
|
|
27
|
+
/**
|
|
28
|
+
* Add the tool descriptions to the planning system prompt in
|
|
29
|
+
* order to get a better suited plan that makes efficient use
|
|
30
|
+
* of the tools
|
|
31
|
+
* @param tools the tools available to the `planner`
|
|
32
|
+
* @returns
|
|
33
|
+
*/
|
|
34
|
+
const getPlannerChatPrompt = async (tools) => {
|
|
35
|
+
const toolStrings = tools
|
|
36
|
+
.map((tool) => `${tool.name}: ${tool.description}`)
|
|
37
|
+
.join("\n");
|
|
38
|
+
return /* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
|
|
39
|
+
chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
40
|
+
chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
41
|
+
]).partial({ toolStrings });
|
|
42
|
+
};
|
|
43
|
+
exports.getPlannerChatPrompt = getPlannerChatPrompt;
|
|
@@ -1,4 +1,12 @@
|
|
|
1
1
|
import { ChatPromptTemplate } from "../../prompts/chat.js";
|
|
2
|
+
import { Tool } from "../../tools/base.js";
|
|
2
3
|
export declare const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE: string;
|
|
3
|
-
export declare const PLANNER_CHAT_PROMPT: ChatPromptTemplate<any, any>;
|
|
4
4
|
export declare const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = "Previous steps: {previous_steps}\n\nCurrent objective: {current_step}\n\n{agent_scratchpad}\n\nYou may extract and combine relevant data from your previous steps when responding to me.";
|
|
5
|
+
/**
|
|
6
|
+
* Add the tool descriptions to the planning system prompt in
|
|
7
|
+
* order to get a better suited plan that makes efficient use
|
|
8
|
+
* of the tools
|
|
9
|
+
* @param tools the tools available to the `planner`
|
|
10
|
+
* @returns
|
|
11
|
+
*/
|
|
12
|
+
export declare const getPlannerChatPrompt: (tools: Tool[]) => Promise<ChatPromptTemplate<import("../../schema/index.js").InputValues<string>, any>>;
|
|
@@ -2,19 +2,18 @@ import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemp
|
|
|
2
2
|
export const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
3
3
|
`Let's first understand the problem and devise a plan to solve the problem.`,
|
|
4
4
|
`Please output the plan starting with the header "Plan:"`,
|
|
5
|
-
`
|
|
5
|
+
`followed by a numbered list of steps.`,
|
|
6
6
|
`Please make the plan the minimum number of steps required`,
|
|
7
7
|
`to answer the query or complete the task accurately and precisely.`,
|
|
8
|
-
`
|
|
9
|
-
|
|
8
|
+
`You have a set of tools at your disposal to help you with this task:`,
|
|
9
|
+
"",
|
|
10
|
+
"{toolStrings}",
|
|
11
|
+
"",
|
|
12
|
+
`You must consider these tools when coming up with your plan.`,
|
|
13
|
+
`If the task is a question, the final step in the plan must be the following: "Given the above steps taken,`,
|
|
10
14
|
`please respond to the original query."`,
|
|
11
15
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
12
16
|
].join(" ");
|
|
13
|
-
export const PLANNER_CHAT_PROMPT =
|
|
14
|
-
/* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
15
|
-
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
16
|
-
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
17
|
-
]);
|
|
18
17
|
export const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = `Previous steps: {previous_steps}
|
|
19
18
|
|
|
20
19
|
Current objective: {current_step}
|
|
@@ -22,3 +21,19 @@ Current objective: {current_step}
|
|
|
22
21
|
{agent_scratchpad}
|
|
23
22
|
|
|
24
23
|
You may extract and combine relevant data from your previous steps when responding to me.`;
|
|
24
|
+
/**
|
|
25
|
+
* Add the tool descriptions to the planning system prompt in
|
|
26
|
+
* order to get a better suited plan that makes efficient use
|
|
27
|
+
* of the tools
|
|
28
|
+
* @param tools the tools available to the `planner`
|
|
29
|
+
* @returns
|
|
30
|
+
*/
|
|
31
|
+
export const getPlannerChatPrompt = async (tools) => {
|
|
32
|
+
const toolStrings = tools
|
|
33
|
+
.map((tool) => `${tool.name}: ${tool.description}`)
|
|
34
|
+
.join("\n");
|
|
35
|
+
return /* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
36
|
+
SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
37
|
+
HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
38
|
+
]).partial({ toolStrings });
|
|
39
|
+
};
|