@saltcorn/large-language-model 0.8.4 → 0.8.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +62 -1
- package/index.js +145 -3
- package/package.json +1 -1
package/generate.js
CHANGED
|
@@ -78,6 +78,23 @@ const getEmbedding = async (config, opts) => {
|
|
|
78
78
|
}
|
|
79
79
|
};
|
|
80
80
|
|
|
81
|
+
const getImageGeneration = async (config, opts) => {
|
|
82
|
+
switch (config.backend) {
|
|
83
|
+
case "OpenAI":
|
|
84
|
+
return await getImageGenOpenAICompatible(
|
|
85
|
+
{
|
|
86
|
+
imageEndpoint: "https://api.openai.com/v1/images/generations",
|
|
87
|
+
bearer: opts?.api_key || opts?.bearer || config.api_key,
|
|
88
|
+
model: opts?.model || config.model,
|
|
89
|
+
responses_api: config.responses_api,
|
|
90
|
+
},
|
|
91
|
+
opts
|
|
92
|
+
);
|
|
93
|
+
default:
|
|
94
|
+
throw new Error("Image generation not implemented for this backend");
|
|
95
|
+
}
|
|
96
|
+
};
|
|
97
|
+
|
|
81
98
|
const getCompletion = async (config, opts) => {
|
|
82
99
|
switch (config.backend) {
|
|
83
100
|
case "OpenAI":
|
|
@@ -318,6 +335,50 @@ const getCompletionOpenAICompatible = async (
|
|
|
318
335
|
|
|
319
336
|
const emptyToUndefined = (xs) => (xs.length ? xs : undefined);
|
|
320
337
|
|
|
338
|
+
const getImageGenOpenAICompatible = async (
|
|
339
|
+
config,
|
|
340
|
+
{
|
|
341
|
+
prompt,
|
|
342
|
+
model,
|
|
343
|
+
debugResult,
|
|
344
|
+
size,
|
|
345
|
+
quality,
|
|
346
|
+
n,
|
|
347
|
+
output_format,
|
|
348
|
+
response_format,
|
|
349
|
+
}
|
|
350
|
+
) => {
|
|
351
|
+
const { imageEndpoint, bearer, apiKey, image_model } = config;
|
|
352
|
+
const headers = {
|
|
353
|
+
"Content-Type": "application/json",
|
|
354
|
+
Accept: "application/json",
|
|
355
|
+
};
|
|
356
|
+
if (bearer) headers.Authorization = "Bearer " + bearer;
|
|
357
|
+
if (apiKey) headers["api-key"] = apiKey;
|
|
358
|
+
const body = {
|
|
359
|
+
//prompt: "How are you?",
|
|
360
|
+
model: model || image_model || "gpt-image-1",
|
|
361
|
+
prompt,
|
|
362
|
+
size: size || "1024x1024",
|
|
363
|
+
n: n || 1,
|
|
364
|
+
};
|
|
365
|
+
if (quality) body.quality = quality;
|
|
366
|
+
if (output_format) body.output_format = output_format;
|
|
367
|
+
if (response_format) body.response_format = response_format;
|
|
368
|
+
if (n) body.n = n;
|
|
369
|
+
if (debugResult) console.log("OpenAI image request", imageEndpoint, body);
|
|
370
|
+
|
|
371
|
+
const rawResponse = await fetch(imageEndpoint, {
|
|
372
|
+
method: "POST",
|
|
373
|
+
headers,
|
|
374
|
+
body: JSON.stringify(body),
|
|
375
|
+
});
|
|
376
|
+
const results = await rawResponse.json();
|
|
377
|
+
if (debugResult) console.log("OpenAI image response", results);
|
|
378
|
+
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
379
|
+
return results?.data?.[0];
|
|
380
|
+
};
|
|
381
|
+
|
|
321
382
|
const getEmbeddingOpenAICompatible = async (
|
|
322
383
|
config,
|
|
323
384
|
{ prompt, model, debugResult }
|
|
@@ -518,4 +579,4 @@ const getEmbeddingGoogleVertex = async (config, opts, oauth2Client) => {
|
|
|
518
579
|
return embeddings;
|
|
519
580
|
};
|
|
520
581
|
|
|
521
|
-
module.exports = { getCompletion, getEmbedding };
|
|
582
|
+
module.exports = { getCompletion, getEmbedding, getImageGeneration };
|
package/index.js
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
const Workflow = require("@saltcorn/data/models/workflow");
|
|
2
2
|
const Form = require("@saltcorn/data/models/form");
|
|
3
|
+
const File = require("@saltcorn/data/models/file");
|
|
3
4
|
const FieldRepeat = require("@saltcorn/data/models/fieldrepeat");
|
|
4
5
|
const Plugin = require("@saltcorn/data/models/plugin");
|
|
5
6
|
const { domReady } = require("@saltcorn/markup/tags");
|
|
6
7
|
const db = require("@saltcorn/data/db");
|
|
7
|
-
const {
|
|
8
|
+
const {
|
|
9
|
+
getCompletion,
|
|
10
|
+
getEmbedding,
|
|
11
|
+
getImageGeneration,
|
|
12
|
+
} = require("./generate");
|
|
8
13
|
const { OPENAI_MODELS } = require("./constants.js");
|
|
9
14
|
const { eval_expression } = require("@saltcorn/data/models/expression");
|
|
10
15
|
const { interpolate } = require("@saltcorn/data/utils");
|
|
@@ -126,7 +131,17 @@ ${domReady(`
|
|
|
126
131
|
],
|
|
127
132
|
},
|
|
128
133
|
},
|
|
129
|
-
|
|
134
|
+
{
|
|
135
|
+
name: "image_model",
|
|
136
|
+
label: "Image model", //gpt-3.5-turbo
|
|
137
|
+
type: "String",
|
|
138
|
+
required: true,
|
|
139
|
+
showIf: { backend: "OpenAI" },
|
|
140
|
+
attributes: {
|
|
141
|
+
options: ["gpt-image-1", "dall-e-2", "dall-e-3"],
|
|
142
|
+
},
|
|
143
|
+
},
|
|
144
|
+
{
|
|
130
145
|
name: "client_id",
|
|
131
146
|
label: "Client ID",
|
|
132
147
|
sublabel: "OAuth2 client ID from your Google Cloud account",
|
|
@@ -219,7 +234,7 @@ ${domReady(`
|
|
|
219
234
|
showIf: { backend: "Google Vertex AI" },
|
|
220
235
|
default: "us-central1",
|
|
221
236
|
},
|
|
222
|
-
|
|
237
|
+
|
|
223
238
|
{
|
|
224
239
|
name: "bearer_auth",
|
|
225
240
|
label: "Bearer Auth",
|
|
@@ -318,6 +333,15 @@ const functions = (config) => {
|
|
|
318
333
|
description: "Generate text with GPT",
|
|
319
334
|
arguments: [{ name: "prompt", type: "String" }],
|
|
320
335
|
},
|
|
336
|
+
llm_image_generate: {
|
|
337
|
+
run: async (prompt, opts) => {
|
|
338
|
+
const result = await getImageGeneration(config, { prompt, ...opts });
|
|
339
|
+
return result;
|
|
340
|
+
},
|
|
341
|
+
isAsync: true,
|
|
342
|
+
description: "Generate image",
|
|
343
|
+
arguments: [{ name: "prompt", type: "String" }],
|
|
344
|
+
},
|
|
321
345
|
llm_embedding: {
|
|
322
346
|
run: async (prompt, opts) => {
|
|
323
347
|
const result = await getEmbedding(config, { prompt, ...opts });
|
|
@@ -567,6 +591,124 @@ module.exports = {
|
|
|
567
591
|
else await table.updateRow(upd, row[table.pk_name]);
|
|
568
592
|
},
|
|
569
593
|
},
|
|
594
|
+
llm_generate_image: {
|
|
595
|
+
description: "Generate image with AI based on a text prompt",
|
|
596
|
+
requireRow: true,
|
|
597
|
+
configFields: ({ table, mode }) => {
|
|
598
|
+
if (mode === "workflow") {
|
|
599
|
+
return [
|
|
600
|
+
{
|
|
601
|
+
name: "prompt_template",
|
|
602
|
+
label: "Prompt",
|
|
603
|
+
sublabel:
|
|
604
|
+
"Prompt text. Use interpolations {{ }} to access variables in the context",
|
|
605
|
+
type: "String",
|
|
606
|
+
fieldview: "textarea",
|
|
607
|
+
required: true,
|
|
608
|
+
},
|
|
609
|
+
{
|
|
610
|
+
name: "answer_field",
|
|
611
|
+
label: "Answer variable",
|
|
612
|
+
sublabel:
|
|
613
|
+
"Set the generated image filename to this context variable",
|
|
614
|
+
type: "String",
|
|
615
|
+
required: true,
|
|
616
|
+
},
|
|
617
|
+
{
|
|
618
|
+
name: "model",
|
|
619
|
+
label: "Model",
|
|
620
|
+
sublabel: "Override default model name",
|
|
621
|
+
type: "String",
|
|
622
|
+
},
|
|
623
|
+
];
|
|
624
|
+
} else if (table) {
|
|
625
|
+
const textFields = table.fields
|
|
626
|
+
.filter((f) => f.type?.sql_name === "text")
|
|
627
|
+
.map((f) => f.name);
|
|
628
|
+
const fileFields = table.fields
|
|
629
|
+
.filter((f) => f.type === "File")
|
|
630
|
+
.map((f) => f.name);
|
|
631
|
+
|
|
632
|
+
return [
|
|
633
|
+
{
|
|
634
|
+
name: "prompt_field",
|
|
635
|
+
label: "Prompt field",
|
|
636
|
+
sublabel: "Field with the text of the prompt",
|
|
637
|
+
type: "String",
|
|
638
|
+
required: true,
|
|
639
|
+
attributes: { options: [...textFields, "Formula"] },
|
|
640
|
+
},
|
|
641
|
+
{
|
|
642
|
+
name: "prompt_formula",
|
|
643
|
+
label: "Prompt formula",
|
|
644
|
+
type: "String",
|
|
645
|
+
showIf: { prompt_field: "Formula" },
|
|
646
|
+
},
|
|
647
|
+
{
|
|
648
|
+
name: "answer_field",
|
|
649
|
+
label: "Answer field",
|
|
650
|
+
sublabel: "Output field will be set to the generated image file",
|
|
651
|
+
type: "String",
|
|
652
|
+
required: true,
|
|
653
|
+
attributes: { options: fileFields },
|
|
654
|
+
},
|
|
655
|
+
];
|
|
656
|
+
}
|
|
657
|
+
},
|
|
658
|
+
run: async ({
|
|
659
|
+
row,
|
|
660
|
+
table,
|
|
661
|
+
user,
|
|
662
|
+
mode,
|
|
663
|
+
configuration: {
|
|
664
|
+
prompt_field,
|
|
665
|
+
prompt_formula,
|
|
666
|
+
prompt_template,
|
|
667
|
+
answer_field,
|
|
668
|
+
override_config,
|
|
669
|
+
chat_history_field,
|
|
670
|
+
model,
|
|
671
|
+
},
|
|
672
|
+
}) => {
|
|
673
|
+
let prompt;
|
|
674
|
+
if (mode === "workflow")
|
|
675
|
+
prompt = interpolate(prompt_template, row, user);
|
|
676
|
+
else if (prompt_field === "Formula" || mode === "workflow")
|
|
677
|
+
prompt = eval_expression(
|
|
678
|
+
prompt_formula,
|
|
679
|
+
row,
|
|
680
|
+
user,
|
|
681
|
+
"llm_generate prompt formula"
|
|
682
|
+
);
|
|
683
|
+
else prompt = row[prompt_field];
|
|
684
|
+
const opts = { debugResult: true }; // response_format: "b64_json" };
|
|
685
|
+
|
|
686
|
+
if (model) opts.model = model;
|
|
687
|
+
let history = [];
|
|
688
|
+
|
|
689
|
+
const ans = await getImageGeneration(config, {
|
|
690
|
+
prompt,
|
|
691
|
+
...opts,
|
|
692
|
+
});
|
|
693
|
+
const upd = {};
|
|
694
|
+
|
|
695
|
+
if (ans.url) {
|
|
696
|
+
//fetch url
|
|
697
|
+
} else if (ans.b64_json) {
|
|
698
|
+
const imgContents = Buffer.from(ans.b64_json, "base64");
|
|
699
|
+
const file = await File.from_contents(
|
|
700
|
+
"generated.png",
|
|
701
|
+
"image/png",
|
|
702
|
+
imgContents,
|
|
703
|
+
user?.id
|
|
704
|
+
);
|
|
705
|
+
upd[answer_field] = file.path_to_serve;
|
|
706
|
+
}
|
|
707
|
+
if (mode === "workflow") return upd;
|
|
708
|
+
else await table.updateRow(upd, row[table.pk_name]);
|
|
709
|
+
},
|
|
710
|
+
},
|
|
711
|
+
|
|
570
712
|
llm_generate_json: {
|
|
571
713
|
description:
|
|
572
714
|
"Generate JSON with AI based on a text prompt. You must sppecify the JSON fields in the configuration.",
|