@saltcorn/large-language-model 0.7.2 → 0.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +66 -6
  2. package/index.js +1 -0
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -272,6 +272,43 @@ const initOAuth2Client = async (config) => {
272
272
  return oauth2Client;
273
273
  };
274
274
 
275
+ const convertChatToVertex = (chat) => {
276
+ const history = [];
277
+ for (const message of chat) {
278
+ const role = message.role === "user" ? "user" : "model";
279
+ if (message.content) {
280
+ const parts = [{ text: message.content }];
281
+ history.push([{ role, parts }]);
282
+ } else if (message.tool_calls) {
283
+ const parts = [
284
+ { functionCall: prepFuncArgsFromChat(message.tool_calls[0].function) },
285
+ ];
286
+ history.push([{ role, parts }]);
287
+ }
288
+ }
289
+ return history;
290
+ };
291
+
292
+ const prepFuncArgsFromChat = (fCall) => {
293
+ if (!fCall.arguments) return fCall;
294
+ else {
295
+ const copy = JSON.parse(JSON.stringify(fCall));
296
+ copy.args = JSON.parse(copy.arguments);
297
+ delete copy.arguments;
298
+ return copy;
299
+ }
300
+ };
301
+
302
+ const prepFuncArgsForChat = (fCall) => {
303
+ if (!fCall.args) return fCall;
304
+ else {
305
+ const copy = JSON.parse(JSON.stringify(fCall));
306
+ copy.arguments = JSON.stringify(copy.args);
307
+ delete copy.args;
308
+ return copy;
309
+ }
310
+ };
311
+
275
312
  const getCompletionGoogleVertex = async (config, opts, oauth2Client) => {
276
313
  const vertexAI = new VertexAI({
277
314
  project: config.project_id,
@@ -283,13 +320,36 @@ const getCompletionGoogleVertex = async (config, opts, oauth2Client) => {
283
320
  const generativeModel = vertexAI.getGenerativeModel({
284
321
  model: config.model,
285
322
  });
286
- const chat = generativeModel.startChat();
287
- const result = await chat.sendMessageStream(opts.prompt);
288
- const chunks = [];
289
- for await (const item of result.stream) {
290
- chunks.push(item.candidates[0].content.parts[0].text);
323
+ const chat = generativeModel.startChat({
324
+ tools: [
325
+ {
326
+ functionDeclarations: opts.tools.map((t) => t.function),
327
+ },
328
+ ],
329
+ history: convertChatToVertex(opts.chat),
330
+ systemInstructions: opts.systemPrompt || "You are a helpful assistant.",
331
+ });
332
+ const { response } = await chat.sendMessage([{ text: opts.prompt }]);
333
+ const parts = response?.candidates?.[0]?.content?.parts;
334
+ if (!parts) return "";
335
+ else if (parts.length === 1 && parts[0].text) return parts[0].text;
336
+ else {
337
+ const result = {};
338
+ for (const part of parts) {
339
+ if (part.functionCall) {
340
+ const toolCall = {
341
+ function: prepFuncArgsForChat(part.functionCall),
342
+ };
343
+ if (!result.tool_calls) result.tool_calls = [toolCall];
344
+ else result.tool_calls.push(toolCall);
345
+ }
346
+ if (part.text)
347
+ result.content = !result.content
348
+ ? part.text
349
+ : result.content + part.text;
350
+ }
351
+ return result;
291
352
  }
292
- return chunks.join();
293
353
  };
294
354
 
295
355
  const getEmbeddingGoogleVertex = async (config, opts, oauth2Client) => {
package/index.js CHANGED
@@ -138,6 +138,7 @@ ${domReady(`
138
138
  label: "Region",
139
139
  sublabel: "Google Cloud region (default: us-central1)",
140
140
  type: "String",
141
+ showIf: { backend: "Google Vertex AI" },
141
142
  default: "us-central1",
142
143
  },
143
144
  {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.7.2",
3
+ "version": "0.7.3",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {