@saltcorn/large-language-model 0.7.3 → 0.7.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +20 -8
- package/index.js +34 -1
- package/package.json +1 -1
package/generate.js
CHANGED
|
@@ -274,7 +274,7 @@ const initOAuth2Client = async (config) => {
|
|
|
274
274
|
|
|
275
275
|
const convertChatToVertex = (chat) => {
|
|
276
276
|
const history = [];
|
|
277
|
-
for (const message of chat) {
|
|
277
|
+
for (const message of chat || []) {
|
|
278
278
|
const role = message.role === "user" ? "user" : "model";
|
|
279
279
|
if (message.content) {
|
|
280
280
|
const parts = [{ text: message.content }];
|
|
@@ -319,16 +319,27 @@ const getCompletionGoogleVertex = async (config, opts, oauth2Client) => {
|
|
|
319
319
|
});
|
|
320
320
|
const generativeModel = vertexAI.getGenerativeModel({
|
|
321
321
|
model: config.model,
|
|
322
|
+
systemInstruction: {
|
|
323
|
+
role: "system",
|
|
324
|
+
parts: [{ text: opts.systemPrompt || "You are a helpful assistant." }],
|
|
325
|
+
},
|
|
326
|
+
generationCon0fig: {
|
|
327
|
+
temperature: config.temperature || 0.7,
|
|
328
|
+
},
|
|
322
329
|
});
|
|
323
|
-
const
|
|
324
|
-
|
|
330
|
+
const chatParams = {
|
|
331
|
+
history: convertChatToVertex(opts.chat),
|
|
332
|
+
};
|
|
333
|
+
if (opts?.tools?.length > 0) {
|
|
334
|
+
chatParams.tools = [
|
|
325
335
|
{
|
|
326
|
-
functionDeclarations: opts.tools.map((t) =>
|
|
336
|
+
functionDeclarations: opts.tools.map((t) =>
|
|
337
|
+
prepFuncArgsForChat(t.function)
|
|
338
|
+
),
|
|
327
339
|
},
|
|
328
|
-
]
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
});
|
|
340
|
+
];
|
|
341
|
+
}
|
|
342
|
+
const chat = generativeModel.startChat(chatParams);
|
|
332
343
|
const { response } = await chat.sendMessage([{ text: opts.prompt }]);
|
|
333
344
|
const parts = response?.candidates?.[0]?.content?.parts;
|
|
334
345
|
if (!parts) return "";
|
|
@@ -339,6 +350,7 @@ const getCompletionGoogleVertex = async (config, opts, oauth2Client) => {
|
|
|
339
350
|
if (part.functionCall) {
|
|
340
351
|
const toolCall = {
|
|
341
352
|
function: prepFuncArgsForChat(part.functionCall),
|
|
353
|
+
id: Math.floor(Math.random() * 1000000),
|
|
342
354
|
};
|
|
343
355
|
if (!result.tool_calls) result.tool_calls = [toolCall];
|
|
344
356
|
else result.tool_calls.push(toolCall);
|
package/index.js
CHANGED
|
@@ -95,10 +95,28 @@ ${domReady(`
|
|
|
95
95
|
type: "String",
|
|
96
96
|
showIf: { backend: "Google Vertex AI" },
|
|
97
97
|
attributes: {
|
|
98
|
-
options: [
|
|
98
|
+
options: [
|
|
99
|
+
"gemini-1.5-pro",
|
|
100
|
+
"gemini-1.5-flash",
|
|
101
|
+
"gemini-2.0-flash",
|
|
102
|
+
],
|
|
99
103
|
},
|
|
100
104
|
required: true,
|
|
101
105
|
},
|
|
106
|
+
{
|
|
107
|
+
name: "temperature",
|
|
108
|
+
label: "Temperature",
|
|
109
|
+
type: "Float",
|
|
110
|
+
sublabel:
|
|
111
|
+
"Controls the randomness of predictions. Higher values make the output more random.",
|
|
112
|
+
showIf: { backend: "Google Vertex AI" },
|
|
113
|
+
default: 0.7,
|
|
114
|
+
attributes: {
|
|
115
|
+
min: 0,
|
|
116
|
+
max: 1,
|
|
117
|
+
decimal_places: 1,
|
|
118
|
+
},
|
|
119
|
+
},
|
|
102
120
|
{
|
|
103
121
|
name: "embed_model",
|
|
104
122
|
label: "Embedding model",
|
|
@@ -303,6 +321,11 @@ const routes = (config) => {
|
|
|
303
321
|
url: "/large-language-model/vertex/authorize",
|
|
304
322
|
method: "get",
|
|
305
323
|
callback: async (req, res) => {
|
|
324
|
+
const role = req?.user?.role_id || 100;
|
|
325
|
+
if (role > 1) {
|
|
326
|
+
req.flash("error", req.__("Not authorized"));
|
|
327
|
+
return res.redirect("/");
|
|
328
|
+
}
|
|
306
329
|
const { client_id, client_secret } = config || {};
|
|
307
330
|
const baseUrl = (
|
|
308
331
|
getState().getConfig("base_url") || "http://localhost:3000"
|
|
@@ -316,6 +339,7 @@ const routes = (config) => {
|
|
|
316
339
|
const authUrl = oauth2Client.generateAuthUrl({
|
|
317
340
|
access_type: "offline",
|
|
318
341
|
scope: "https://www.googleapis.com/auth/cloud-platform",
|
|
342
|
+
prompt: "consent",
|
|
319
343
|
});
|
|
320
344
|
res.redirect(authUrl);
|
|
321
345
|
},
|
|
@@ -324,6 +348,11 @@ const routes = (config) => {
|
|
|
324
348
|
url: "/large-language-model/vertex/callback",
|
|
325
349
|
method: "get",
|
|
326
350
|
callback: async (req, res) => {
|
|
351
|
+
const role = req?.user?.role_id || 100;
|
|
352
|
+
if (role > 1) {
|
|
353
|
+
req.flash("error", req.__("Not authorized"));
|
|
354
|
+
return res.redirect("/");
|
|
355
|
+
}
|
|
327
356
|
const { client_id, client_secret } = config || {};
|
|
328
357
|
const baseUrl = (
|
|
329
358
|
getState().getConfig("base_url") || "http://localhost:3000"
|
|
@@ -355,6 +384,10 @@ const routes = (config) => {
|
|
|
355
384
|
const newConfig = { ...(plugin.configuration || {}), tokens };
|
|
356
385
|
plugin.configuration = newConfig;
|
|
357
386
|
await plugin.upsert();
|
|
387
|
+
getState().processSend({
|
|
388
|
+
refresh_plugin_cfg: plugin.name,
|
|
389
|
+
tenant: db.getTenantSchema(),
|
|
390
|
+
});
|
|
358
391
|
req.flash(
|
|
359
392
|
"success",
|
|
360
393
|
req.__("Authentication successful! You can now use Vertex AI.")
|