@saltcorn/large-language-model 0.6.4 → 0.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +6 -6
  2. package/index.js +199 -3
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -174,12 +174,12 @@ const getCompletionOpenAICompatible = async (
174
174
  console.log("OpenAI response", JSON.stringify(results, null, 2));
175
175
  if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
176
176
 
177
- return (
178
- results?.choices?.[0]?.message?.content ||
179
- (results?.choices?.[0]?.message?.tool_calls
180
- ? { tool_calls: results?.choices?.[0]?.message?.tool_calls }
181
- : null)
182
- );
177
+ return results?.choices?.[0]?.message?.tool_calls
178
+ ? {
179
+ tool_calls: results?.choices?.[0]?.message?.tool_calls,
180
+ content: results?.choices?.[0]?.message?.content || null,
181
+ }
182
+ : results?.choices?.[0]?.message?.content || null;
183
183
  };
184
184
 
185
185
  const getEmbeddingOpenAICompatible = async (
package/index.js CHANGED
@@ -194,6 +194,7 @@ module.exports = {
194
194
  actions: (config) => ({
195
195
  llm_function_call: require("./function-insert-action.js")(config),
196
196
  llm_generate: {
197
+ description: "Generate text with AI based on a text prompt",
197
198
  requireRow: true,
198
199
  configFields: ({ table, mode }) => {
199
200
  const override_fields =
@@ -236,8 +237,7 @@ module.exports = {
236
237
  },
237
238
  ...override_fields,
238
239
  ];
239
- }
240
- if (table) {
240
+ } else if (table) {
241
241
  const textFields = table.fields
242
242
  .filter((f) => f.type?.sql_name === "text")
243
243
  .map((f) => f.name);
@@ -318,7 +318,203 @@ module.exports = {
318
318
  upd[chat_history_field] = [
319
319
  ...history,
320
320
  { role: "user", content: prompt },
321
- { role: "system", content: ans },
321
+ { role: "assistant", content: ans },
322
+ ];
323
+ }
324
+ if (mode === "workflow") return upd;
325
+ else await table.updateRow(upd, row[table.pk_name]);
326
+ },
327
+ },
328
+ llm_generate_json: {
329
+ description: "Generate JSON with AI based on a text prompt. You must sppecify the JSON fields in the configuration.",
330
+ requireRow: true,
331
+ configFields: ({ table, mode }) => {
332
+ const override_fields =
333
+ config.backend === "OpenAI-compatible API" &&
334
+ (config.altconfigs || []).filter((c) => c.name).length
335
+ ? [
336
+ {
337
+ name: "override_config",
338
+ label: "Alternative LLM configuration",
339
+ type: "String",
340
+ attributes: { options: config.altconfigs.map((c) => c.name) },
341
+ },
342
+ ]
343
+ : [];
344
+ const cfgFields = [];
345
+ const fieldsField = new FieldRepeat({
346
+ name: "fields",
347
+ fields: [
348
+ {
349
+ name: "name",
350
+ label: "Name",
351
+ sublabel: "The field name, as a valid JavaScript identifier",
352
+ type: "String",
353
+ required: true,
354
+ },
355
+ {
356
+ label: "Description",
357
+ name: "description",
358
+ sublabel: "A description of the field.",
359
+ type: "String",
360
+ },
361
+ {
362
+ name: "type",
363
+ label: "Type",
364
+ type: "String",
365
+ required: true,
366
+ attributes: {
367
+ options: ["string", "integer", "number", "boolean"],
368
+ },
369
+ },
370
+ ],
371
+ });
372
+
373
+ if (mode === "workflow") {
374
+ cfgFields.push(
375
+ {
376
+ name: "prompt_template",
377
+ label: "Prompt",
378
+ sublabel:
379
+ "Prompt text. Use interpolations {{ }} to access variables in the context",
380
+ type: "String",
381
+ fieldview: "textarea",
382
+ required: true,
383
+ },
384
+ {
385
+ name: "answer_field",
386
+ label: "Answer variable",
387
+ sublabel: "Set the generated answer to this context variable",
388
+ type: "String",
389
+ required: true,
390
+ },
391
+ {
392
+ name: "chat_history_field",
393
+ label: "Chat history variable",
394
+ sublabel:
395
+ "Use this context variable to store the chat history for subsequent prompts",
396
+ type: "String",
397
+ }
398
+ );
399
+ } else if (table) {
400
+ const jsonFields = table.fields
401
+ .filter((f) => f.type?.name === "JSON")
402
+ .map((f) => f.name);
403
+
404
+ cfgFields.push(
405
+ {
406
+ name: "prompt_template",
407
+ label: "Prompt",
408
+ sublabel:
409
+ "Prompt text. Use interpolations {{ }} to access variables in the row",
410
+ type: "String",
411
+ fieldview: "textarea",
412
+ required: true,
413
+ },
414
+ {
415
+ name: "answer_field",
416
+ label: "Answer field",
417
+ sublabel: "Output field will be set to the generated data",
418
+ type: "String",
419
+ required: true,
420
+ attributes: { options: jsonFields },
421
+ }
422
+ );
423
+ }
424
+
425
+ cfgFields.push(
426
+ ...override_fields,
427
+ {
428
+ name: "multiple",
429
+ label: "Multiple",
430
+ type: "Bool",
431
+ sublabel:
432
+ "Select (true) to generate an array of objects. Unselect (false) for a single object",
433
+ },
434
+ {
435
+ name: "gen_description",
436
+ label: "Description",
437
+ sublabel: "A short description of what you want to generate.",
438
+ type: "String",
439
+ },
440
+ {
441
+ input_type: "section_header",
442
+ label: "JSON fields to generate",
443
+ },
444
+ fieldsField
445
+ );
446
+ return cfgFields;
447
+ },
448
+ run: async ({
449
+ row,
450
+ table,
451
+ user,
452
+ mode,
453
+ configuration: {
454
+ prompt_template,
455
+ fields,
456
+ mulitple,
457
+ gen_description,
458
+ answer_field,
459
+ override_config,
460
+ chat_history_field,
461
+ },
462
+ }) => {
463
+ let prompt = interpolate(prompt_template, row, user);
464
+
465
+ const opts = {};
466
+ if (override_config) {
467
+ const altcfg = config.altconfigs.find(
468
+ (c) => c.name === override_config
469
+ );
470
+ opts.endpoint = altcfg.endpoint;
471
+ opts.model = altcfg.model;
472
+ opts.api_key = altcfg.api_key;
473
+ opts.bearer = altcfg.bearer;
474
+ }
475
+ let history = [];
476
+ if (chat_history_field && row[chat_history_field]) {
477
+ history = row[chat_history_field];
478
+ }
479
+ const fieldArgs = {};
480
+ (fields || []).forEach((field) => {
481
+ fieldArgs[field.name] = {
482
+ type: field.type,
483
+ description: field.description,
484
+ };
485
+ });
486
+ const argObj = { type: "object", properties: fieldArgs };
487
+ const args = {
488
+ [answer_field]: mulitple ? { type: "array", items: argObj } : argObj,
489
+ };
490
+ const expert_function = {
491
+ type: "function",
492
+ function: {
493
+ name: answer_field,
494
+ description: gen_description || undefined,
495
+ parameters: {
496
+ type: "object",
497
+ properties: args,
498
+ },
499
+ },
500
+ };
501
+ const toolargs = {
502
+ tools: [expert_function],
503
+ tool_choice: { type: "function", function: { name: answer_field } },
504
+ };
505
+ const compl = await getCompletion(config, {
506
+ prompt,
507
+ chat: history,
508
+ ...opts,
509
+ ...toolargs,
510
+ });
511
+ const ans = JSON.parse(compl.tool_calls[0].function.arguments)[answer_field];
512
+ const upd = { [answer_field]: ans };
513
+ if (chat_history_field) {
514
+ upd[chat_history_field] = [
515
+ ...history,
516
+ { role: "user", content: prompt },
517
+ { role: "assistant", content: ans },
322
518
  ];
323
519
  }
324
520
  if (mode === "workflow") return upd;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.6.4",
3
+ "version": "0.6.6",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {